xref: /linux/mm/memcontrol.c (revision 2d7f3d1a5866705be2393150e1ffdf67030ab88d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/sched/isolation.h>
67 #include <linux/kmemleak.h>
68 #include "internal.h"
69 #include <net/sock.h>
70 #include <net/ip.h>
71 #include "slab.h"
72 #include "swap.h"
73 
74 #include <linux/uaccess.h>
75 
76 #include <trace/events/vmscan.h>
77 
78 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
79 EXPORT_SYMBOL(memory_cgrp_subsys);
80 
81 struct mem_cgroup *root_mem_cgroup __read_mostly;
82 
83 /* Active memory cgroup to use from an interrupt context */
84 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
85 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
86 
87 /* Socket memory accounting disabled? */
88 static bool cgroup_memory_nosocket __ro_after_init;
89 
90 /* Kernel memory accounting disabled? */
91 static bool cgroup_memory_nokmem __ro_after_init;
92 
93 /* BPF memory accounting disabled? */
94 static bool cgroup_memory_nobpf __ro_after_init;
95 
96 #ifdef CONFIG_CGROUP_WRITEBACK
97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
98 #endif
99 
100 /* Whether legacy memory+swap accounting is active */
101 static bool do_memsw_account(void)
102 {
103 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
104 }
105 
106 #define THRESHOLDS_EVENTS_TARGET 128
107 #define SOFTLIMIT_EVENTS_TARGET 1024
108 
109 /*
110  * Cgroups above their limits are maintained in a RB-Tree, independent of
111  * their hierarchy representation
112  */
113 
114 struct mem_cgroup_tree_per_node {
115 	struct rb_root rb_root;
116 	struct rb_node *rb_rightmost;
117 	spinlock_t lock;
118 };
119 
120 struct mem_cgroup_tree {
121 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
122 };
123 
124 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
125 
126 /* for OOM */
127 struct mem_cgroup_eventfd_list {
128 	struct list_head list;
129 	struct eventfd_ctx *eventfd;
130 };
131 
132 /*
133  * cgroup_event represents events which userspace want to receive.
134  */
135 struct mem_cgroup_event {
136 	/*
137 	 * memcg which the event belongs to.
138 	 */
139 	struct mem_cgroup *memcg;
140 	/*
141 	 * eventfd to signal userspace about the event.
142 	 */
143 	struct eventfd_ctx *eventfd;
144 	/*
145 	 * Each of these stored in a list by the cgroup.
146 	 */
147 	struct list_head list;
148 	/*
149 	 * register_event() callback will be used to add new userspace
150 	 * waiter for changes related to this event.  Use eventfd_signal()
151 	 * on eventfd to send notification to userspace.
152 	 */
153 	int (*register_event)(struct mem_cgroup *memcg,
154 			      struct eventfd_ctx *eventfd, const char *args);
155 	/*
156 	 * unregister_event() callback will be called when userspace closes
157 	 * the eventfd or on cgroup removing.  This callback must be set,
158 	 * if you want provide notification functionality.
159 	 */
160 	void (*unregister_event)(struct mem_cgroup *memcg,
161 				 struct eventfd_ctx *eventfd);
162 	/*
163 	 * All fields below needed to unregister event when
164 	 * userspace closes eventfd.
165 	 */
166 	poll_table pt;
167 	wait_queue_head_t *wqh;
168 	wait_queue_entry_t wait;
169 	struct work_struct remove;
170 };
171 
172 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
174 
175 /* Stuffs for move charges at task migration. */
176 /*
177  * Types of charges to be moved.
178  */
179 #define MOVE_ANON	0x1U
180 #define MOVE_FILE	0x2U
181 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
182 
183 /* "mc" and its members are protected by cgroup_mutex */
184 static struct move_charge_struct {
185 	spinlock_t	  lock; /* for from, to */
186 	struct mm_struct  *mm;
187 	struct mem_cgroup *from;
188 	struct mem_cgroup *to;
189 	unsigned long flags;
190 	unsigned long precharge;
191 	unsigned long moved_charge;
192 	unsigned long moved_swap;
193 	struct task_struct *moving_task;	/* a task moving charges */
194 	wait_queue_head_t waitq;		/* a waitq for other context */
195 } mc = {
196 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
197 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
198 };
199 
200 /*
201  * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
202  * limit reclaim to prevent infinite loops, if they ever occur.
203  */
204 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
205 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
206 
207 /* for encoding cft->private value on file */
208 enum res_type {
209 	_MEM,
210 	_MEMSWAP,
211 	_KMEM,
212 	_TCP,
213 };
214 
215 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
216 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
217 #define MEMFILE_ATTR(val)	((val) & 0xffff)
218 
219 /*
220  * Iteration constructs for visiting all cgroups (under a tree).  If
221  * loops are exited prematurely (break), mem_cgroup_iter_break() must
222  * be used for reference counting.
223  */
224 #define for_each_mem_cgroup_tree(iter, root)		\
225 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
226 	     iter != NULL;				\
227 	     iter = mem_cgroup_iter(root, iter, NULL))
228 
229 #define for_each_mem_cgroup(iter)			\
230 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
231 	     iter != NULL;				\
232 	     iter = mem_cgroup_iter(NULL, iter, NULL))
233 
234 static inline bool task_is_dying(void)
235 {
236 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
237 		(current->flags & PF_EXITING);
238 }
239 
240 /* Some nice accessors for the vmpressure. */
241 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
242 {
243 	if (!memcg)
244 		memcg = root_mem_cgroup;
245 	return &memcg->vmpressure;
246 }
247 
248 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
249 {
250 	return container_of(vmpr, struct mem_cgroup, vmpressure);
251 }
252 
253 #define CURRENT_OBJCG_UPDATE_BIT 0
254 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
255 
256 #ifdef CONFIG_MEMCG_KMEM
257 static DEFINE_SPINLOCK(objcg_lock);
258 
259 bool mem_cgroup_kmem_disabled(void)
260 {
261 	return cgroup_memory_nokmem;
262 }
263 
264 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
265 				      unsigned int nr_pages);
266 
267 static void obj_cgroup_release(struct percpu_ref *ref)
268 {
269 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
270 	unsigned int nr_bytes;
271 	unsigned int nr_pages;
272 	unsigned long flags;
273 
274 	/*
275 	 * At this point all allocated objects are freed, and
276 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
277 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
278 	 *
279 	 * The following sequence can lead to it:
280 	 * 1) CPU0: objcg == stock->cached_objcg
281 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
282 	 *          PAGE_SIZE bytes are charged
283 	 * 3) CPU1: a process from another memcg is allocating something,
284 	 *          the stock if flushed,
285 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
286 	 * 5) CPU0: we do release this object,
287 	 *          92 bytes are added to stock->nr_bytes
288 	 * 6) CPU0: stock is flushed,
289 	 *          92 bytes are added to objcg->nr_charged_bytes
290 	 *
291 	 * In the result, nr_charged_bytes == PAGE_SIZE.
292 	 * This page will be uncharged in obj_cgroup_release().
293 	 */
294 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
295 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
296 	nr_pages = nr_bytes >> PAGE_SHIFT;
297 
298 	if (nr_pages)
299 		obj_cgroup_uncharge_pages(objcg, nr_pages);
300 
301 	spin_lock_irqsave(&objcg_lock, flags);
302 	list_del(&objcg->list);
303 	spin_unlock_irqrestore(&objcg_lock, flags);
304 
305 	percpu_ref_exit(ref);
306 	kfree_rcu(objcg, rcu);
307 }
308 
309 static struct obj_cgroup *obj_cgroup_alloc(void)
310 {
311 	struct obj_cgroup *objcg;
312 	int ret;
313 
314 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
315 	if (!objcg)
316 		return NULL;
317 
318 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
319 			      GFP_KERNEL);
320 	if (ret) {
321 		kfree(objcg);
322 		return NULL;
323 	}
324 	INIT_LIST_HEAD(&objcg->list);
325 	return objcg;
326 }
327 
328 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
329 				  struct mem_cgroup *parent)
330 {
331 	struct obj_cgroup *objcg, *iter;
332 
333 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
334 
335 	spin_lock_irq(&objcg_lock);
336 
337 	/* 1) Ready to reparent active objcg. */
338 	list_add(&objcg->list, &memcg->objcg_list);
339 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
340 	list_for_each_entry(iter, &memcg->objcg_list, list)
341 		WRITE_ONCE(iter->memcg, parent);
342 	/* 3) Move already reparented objcgs to the parent's list */
343 	list_splice(&memcg->objcg_list, &parent->objcg_list);
344 
345 	spin_unlock_irq(&objcg_lock);
346 
347 	percpu_ref_kill(&objcg->refcnt);
348 }
349 
350 /*
351  * A lot of the calls to the cache allocation functions are expected to be
352  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
353  * conditional to this static branch, we'll have to allow modules that does
354  * kmem_cache_alloc and the such to see this symbol as well
355  */
356 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
357 EXPORT_SYMBOL(memcg_kmem_online_key);
358 
359 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
360 EXPORT_SYMBOL(memcg_bpf_enabled_key);
361 #endif
362 
363 /**
364  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
365  * @folio: folio of interest
366  *
367  * If memcg is bound to the default hierarchy, css of the memcg associated
368  * with @folio is returned.  The returned css remains associated with @folio
369  * until it is released.
370  *
371  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
372  * is returned.
373  */
374 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
375 {
376 	struct mem_cgroup *memcg = folio_memcg(folio);
377 
378 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
379 		memcg = root_mem_cgroup;
380 
381 	return &memcg->css;
382 }
383 
384 /**
385  * page_cgroup_ino - return inode number of the memcg a page is charged to
386  * @page: the page
387  *
388  * Look up the closest online ancestor of the memory cgroup @page is charged to
389  * and return its inode number or 0 if @page is not charged to any cgroup. It
390  * is safe to call this function without holding a reference to @page.
391  *
392  * Note, this function is inherently racy, because there is nothing to prevent
393  * the cgroup inode from getting torn down and potentially reallocated a moment
394  * after page_cgroup_ino() returns, so it only should be used by callers that
395  * do not care (such as procfs interfaces).
396  */
397 ino_t page_cgroup_ino(struct page *page)
398 {
399 	struct mem_cgroup *memcg;
400 	unsigned long ino = 0;
401 
402 	rcu_read_lock();
403 	/* page_folio() is racy here, but the entire function is racy anyway */
404 	memcg = folio_memcg_check(page_folio(page));
405 
406 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
407 		memcg = parent_mem_cgroup(memcg);
408 	if (memcg)
409 		ino = cgroup_ino(memcg->css.cgroup);
410 	rcu_read_unlock();
411 	return ino;
412 }
413 
414 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
415 					 struct mem_cgroup_tree_per_node *mctz,
416 					 unsigned long new_usage_in_excess)
417 {
418 	struct rb_node **p = &mctz->rb_root.rb_node;
419 	struct rb_node *parent = NULL;
420 	struct mem_cgroup_per_node *mz_node;
421 	bool rightmost = true;
422 
423 	if (mz->on_tree)
424 		return;
425 
426 	mz->usage_in_excess = new_usage_in_excess;
427 	if (!mz->usage_in_excess)
428 		return;
429 	while (*p) {
430 		parent = *p;
431 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
432 					tree_node);
433 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
434 			p = &(*p)->rb_left;
435 			rightmost = false;
436 		} else {
437 			p = &(*p)->rb_right;
438 		}
439 	}
440 
441 	if (rightmost)
442 		mctz->rb_rightmost = &mz->tree_node;
443 
444 	rb_link_node(&mz->tree_node, parent, p);
445 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
446 	mz->on_tree = true;
447 }
448 
449 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
450 					 struct mem_cgroup_tree_per_node *mctz)
451 {
452 	if (!mz->on_tree)
453 		return;
454 
455 	if (&mz->tree_node == mctz->rb_rightmost)
456 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
457 
458 	rb_erase(&mz->tree_node, &mctz->rb_root);
459 	mz->on_tree = false;
460 }
461 
462 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
463 				       struct mem_cgroup_tree_per_node *mctz)
464 {
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&mctz->lock, flags);
468 	__mem_cgroup_remove_exceeded(mz, mctz);
469 	spin_unlock_irqrestore(&mctz->lock, flags);
470 }
471 
472 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
473 {
474 	unsigned long nr_pages = page_counter_read(&memcg->memory);
475 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
476 	unsigned long excess = 0;
477 
478 	if (nr_pages > soft_limit)
479 		excess = nr_pages - soft_limit;
480 
481 	return excess;
482 }
483 
484 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
485 {
486 	unsigned long excess;
487 	struct mem_cgroup_per_node *mz;
488 	struct mem_cgroup_tree_per_node *mctz;
489 
490 	if (lru_gen_enabled()) {
491 		if (soft_limit_excess(memcg))
492 			lru_gen_soft_reclaim(memcg, nid);
493 		return;
494 	}
495 
496 	mctz = soft_limit_tree.rb_tree_per_node[nid];
497 	if (!mctz)
498 		return;
499 	/*
500 	 * Necessary to update all ancestors when hierarchy is used.
501 	 * because their event counter is not touched.
502 	 */
503 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
504 		mz = memcg->nodeinfo[nid];
505 		excess = soft_limit_excess(memcg);
506 		/*
507 		 * We have to update the tree if mz is on RB-tree or
508 		 * mem is over its softlimit.
509 		 */
510 		if (excess || mz->on_tree) {
511 			unsigned long flags;
512 
513 			spin_lock_irqsave(&mctz->lock, flags);
514 			/* if on-tree, remove it */
515 			if (mz->on_tree)
516 				__mem_cgroup_remove_exceeded(mz, mctz);
517 			/*
518 			 * Insert again. mz->usage_in_excess will be updated.
519 			 * If excess is 0, no tree ops.
520 			 */
521 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
522 			spin_unlock_irqrestore(&mctz->lock, flags);
523 		}
524 	}
525 }
526 
527 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
528 {
529 	struct mem_cgroup_tree_per_node *mctz;
530 	struct mem_cgroup_per_node *mz;
531 	int nid;
532 
533 	for_each_node(nid) {
534 		mz = memcg->nodeinfo[nid];
535 		mctz = soft_limit_tree.rb_tree_per_node[nid];
536 		if (mctz)
537 			mem_cgroup_remove_exceeded(mz, mctz);
538 	}
539 }
540 
541 static struct mem_cgroup_per_node *
542 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
543 {
544 	struct mem_cgroup_per_node *mz;
545 
546 retry:
547 	mz = NULL;
548 	if (!mctz->rb_rightmost)
549 		goto done;		/* Nothing to reclaim from */
550 
551 	mz = rb_entry(mctz->rb_rightmost,
552 		      struct mem_cgroup_per_node, tree_node);
553 	/*
554 	 * Remove the node now but someone else can add it back,
555 	 * we will to add it back at the end of reclaim to its correct
556 	 * position in the tree.
557 	 */
558 	__mem_cgroup_remove_exceeded(mz, mctz);
559 	if (!soft_limit_excess(mz->memcg) ||
560 	    !css_tryget(&mz->memcg->css))
561 		goto retry;
562 done:
563 	return mz;
564 }
565 
566 static struct mem_cgroup_per_node *
567 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
568 {
569 	struct mem_cgroup_per_node *mz;
570 
571 	spin_lock_irq(&mctz->lock);
572 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
573 	spin_unlock_irq(&mctz->lock);
574 	return mz;
575 }
576 
577 /* Subset of vm_event_item to report for memcg event stats */
578 static const unsigned int memcg_vm_event_stat[] = {
579 	PGPGIN,
580 	PGPGOUT,
581 	PGSCAN_KSWAPD,
582 	PGSCAN_DIRECT,
583 	PGSCAN_KHUGEPAGED,
584 	PGSTEAL_KSWAPD,
585 	PGSTEAL_DIRECT,
586 	PGSTEAL_KHUGEPAGED,
587 	PGFAULT,
588 	PGMAJFAULT,
589 	PGREFILL,
590 	PGACTIVATE,
591 	PGDEACTIVATE,
592 	PGLAZYFREE,
593 	PGLAZYFREED,
594 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
595 	ZSWPIN,
596 	ZSWPOUT,
597 	ZSWPWB,
598 #endif
599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 	THP_FAULT_ALLOC,
601 	THP_COLLAPSE_ALLOC,
602 	THP_SWPOUT,
603 	THP_SWPOUT_FALLBACK,
604 #endif
605 };
606 
607 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
608 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
609 
610 static void init_memcg_events(void)
611 {
612 	int i;
613 
614 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
615 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
616 }
617 
618 static inline int memcg_events_index(enum vm_event_item idx)
619 {
620 	return mem_cgroup_events_index[idx] - 1;
621 }
622 
623 struct memcg_vmstats_percpu {
624 	/* Stats updates since the last flush */
625 	unsigned int			stats_updates;
626 
627 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
628 	struct memcg_vmstats_percpu	*parent;
629 	struct memcg_vmstats		*vmstats;
630 
631 	/* The above should fit a single cacheline for memcg_rstat_updated() */
632 
633 	/* Local (CPU and cgroup) page state & events */
634 	long			state[MEMCG_NR_STAT];
635 	unsigned long		events[NR_MEMCG_EVENTS];
636 
637 	/* Delta calculation for lockless upward propagation */
638 	long			state_prev[MEMCG_NR_STAT];
639 	unsigned long		events_prev[NR_MEMCG_EVENTS];
640 
641 	/* Cgroup1: threshold notifications & softlimit tree updates */
642 	unsigned long		nr_page_events;
643 	unsigned long		targets[MEM_CGROUP_NTARGETS];
644 } ____cacheline_aligned;
645 
646 struct memcg_vmstats {
647 	/* Aggregated (CPU and subtree) page state & events */
648 	long			state[MEMCG_NR_STAT];
649 	unsigned long		events[NR_MEMCG_EVENTS];
650 
651 	/* Non-hierarchical (CPU aggregated) page state & events */
652 	long			state_local[MEMCG_NR_STAT];
653 	unsigned long		events_local[NR_MEMCG_EVENTS];
654 
655 	/* Pending child counts during tree propagation */
656 	long			state_pending[MEMCG_NR_STAT];
657 	unsigned long		events_pending[NR_MEMCG_EVENTS];
658 
659 	/* Stats updates since the last flush */
660 	atomic64_t		stats_updates;
661 };
662 
663 /*
664  * memcg and lruvec stats flushing
665  *
666  * Many codepaths leading to stats update or read are performance sensitive and
667  * adding stats flushing in such codepaths is not desirable. So, to optimize the
668  * flushing the kernel does:
669  *
670  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
671  *    rstat update tree grow unbounded.
672  *
673  * 2) Flush the stats synchronously on reader side only when there are more than
674  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
675  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
676  *    only for 2 seconds due to (1).
677  */
678 static void flush_memcg_stats_dwork(struct work_struct *w);
679 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
680 static u64 flush_last_time;
681 
682 #define FLUSH_TIME (2UL*HZ)
683 
684 /*
685  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
686  * not rely on this as part of an acquired spinlock_t lock. These functions are
687  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
688  * is sufficient.
689  */
690 static void memcg_stats_lock(void)
691 {
692 	preempt_disable_nested();
693 	VM_WARN_ON_IRQS_ENABLED();
694 }
695 
696 static void __memcg_stats_lock(void)
697 {
698 	preempt_disable_nested();
699 }
700 
701 static void memcg_stats_unlock(void)
702 {
703 	preempt_enable_nested();
704 }
705 
706 
707 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
708 {
709 	return atomic64_read(&vmstats->stats_updates) >
710 		MEMCG_CHARGE_BATCH * num_online_cpus();
711 }
712 
713 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
714 {
715 	struct memcg_vmstats_percpu *statc;
716 	int cpu = smp_processor_id();
717 
718 	if (!val)
719 		return;
720 
721 	cgroup_rstat_updated(memcg->css.cgroup, cpu);
722 	statc = this_cpu_ptr(memcg->vmstats_percpu);
723 	for (; statc; statc = statc->parent) {
724 		statc->stats_updates += abs(val);
725 		if (statc->stats_updates < MEMCG_CHARGE_BATCH)
726 			continue;
727 
728 		/*
729 		 * If @memcg is already flush-able, increasing stats_updates is
730 		 * redundant. Avoid the overhead of the atomic update.
731 		 */
732 		if (!memcg_vmstats_needs_flush(statc->vmstats))
733 			atomic64_add(statc->stats_updates,
734 				     &statc->vmstats->stats_updates);
735 		statc->stats_updates = 0;
736 	}
737 }
738 
739 static void do_flush_stats(struct mem_cgroup *memcg)
740 {
741 	if (mem_cgroup_is_root(memcg))
742 		WRITE_ONCE(flush_last_time, jiffies_64);
743 
744 	cgroup_rstat_flush(memcg->css.cgroup);
745 }
746 
747 /*
748  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
749  * @memcg: root of the subtree to flush
750  *
751  * Flushing is serialized by the underlying global rstat lock. There is also a
752  * minimum amount of work to be done even if there are no stat updates to flush.
753  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
754  * avoids unnecessary work and contention on the underlying lock.
755  */
756 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
757 {
758 	if (mem_cgroup_disabled())
759 		return;
760 
761 	if (!memcg)
762 		memcg = root_mem_cgroup;
763 
764 	if (memcg_vmstats_needs_flush(memcg->vmstats))
765 		do_flush_stats(memcg);
766 }
767 
768 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
769 {
770 	/* Only flush if the periodic flusher is one full cycle late */
771 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
772 		mem_cgroup_flush_stats(memcg);
773 }
774 
775 static void flush_memcg_stats_dwork(struct work_struct *w)
776 {
777 	/*
778 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
779 	 * in latency-sensitive paths is as cheap as possible.
780 	 */
781 	do_flush_stats(root_mem_cgroup);
782 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
783 }
784 
785 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
786 {
787 	long x = READ_ONCE(memcg->vmstats->state[idx]);
788 #ifdef CONFIG_SMP
789 	if (x < 0)
790 		x = 0;
791 #endif
792 	return x;
793 }
794 
795 static int memcg_page_state_unit(int item);
796 
797 /*
798  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
799  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
800  */
801 static int memcg_state_val_in_pages(int idx, int val)
802 {
803 	int unit = memcg_page_state_unit(idx);
804 
805 	if (!val || unit == PAGE_SIZE)
806 		return val;
807 	else
808 		return max(val * unit / PAGE_SIZE, 1UL);
809 }
810 
811 /**
812  * __mod_memcg_state - update cgroup memory statistics
813  * @memcg: the memory cgroup
814  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
815  * @val: delta to add to the counter, can be negative
816  */
817 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
818 {
819 	if (mem_cgroup_disabled())
820 		return;
821 
822 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
823 	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
824 }
825 
826 /* idx can be of type enum memcg_stat_item or node_stat_item. */
827 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
828 {
829 	long x = READ_ONCE(memcg->vmstats->state_local[idx]);
830 
831 #ifdef CONFIG_SMP
832 	if (x < 0)
833 		x = 0;
834 #endif
835 	return x;
836 }
837 
838 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
839 			      int val)
840 {
841 	struct mem_cgroup_per_node *pn;
842 	struct mem_cgroup *memcg;
843 
844 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
845 	memcg = pn->memcg;
846 
847 	/*
848 	 * The caller from rmap relies on disabled preemption because they never
849 	 * update their counter from in-interrupt context. For these two
850 	 * counters we check that the update is never performed from an
851 	 * interrupt context while other caller need to have disabled interrupt.
852 	 */
853 	__memcg_stats_lock();
854 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
855 		switch (idx) {
856 		case NR_ANON_MAPPED:
857 		case NR_FILE_MAPPED:
858 		case NR_ANON_THPS:
859 		case NR_SHMEM_PMDMAPPED:
860 		case NR_FILE_PMDMAPPED:
861 			WARN_ON_ONCE(!in_task());
862 			break;
863 		default:
864 			VM_WARN_ON_IRQS_ENABLED();
865 		}
866 	}
867 
868 	/* Update memcg */
869 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
870 
871 	/* Update lruvec */
872 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
873 
874 	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
875 	memcg_stats_unlock();
876 }
877 
878 /**
879  * __mod_lruvec_state - update lruvec memory statistics
880  * @lruvec: the lruvec
881  * @idx: the stat item
882  * @val: delta to add to the counter, can be negative
883  *
884  * The lruvec is the intersection of the NUMA node and a cgroup. This
885  * function updates the all three counters that are affected by a
886  * change of state at this level: per-node, per-cgroup, per-lruvec.
887  */
888 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
889 			int val)
890 {
891 	/* Update node */
892 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
893 
894 	/* Update memcg and lruvec */
895 	if (!mem_cgroup_disabled())
896 		__mod_memcg_lruvec_state(lruvec, idx, val);
897 }
898 
899 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
900 			     int val)
901 {
902 	struct mem_cgroup *memcg;
903 	pg_data_t *pgdat = folio_pgdat(folio);
904 	struct lruvec *lruvec;
905 
906 	rcu_read_lock();
907 	memcg = folio_memcg(folio);
908 	/* Untracked pages have no memcg, no lruvec. Update only the node */
909 	if (!memcg) {
910 		rcu_read_unlock();
911 		__mod_node_page_state(pgdat, idx, val);
912 		return;
913 	}
914 
915 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
916 	__mod_lruvec_state(lruvec, idx, val);
917 	rcu_read_unlock();
918 }
919 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
920 
921 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
922 {
923 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
924 	struct mem_cgroup *memcg;
925 	struct lruvec *lruvec;
926 
927 	rcu_read_lock();
928 	memcg = mem_cgroup_from_slab_obj(p);
929 
930 	/*
931 	 * Untracked pages have no memcg, no lruvec. Update only the
932 	 * node. If we reparent the slab objects to the root memcg,
933 	 * when we free the slab object, we need to update the per-memcg
934 	 * vmstats to keep it correct for the root memcg.
935 	 */
936 	if (!memcg) {
937 		__mod_node_page_state(pgdat, idx, val);
938 	} else {
939 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
940 		__mod_lruvec_state(lruvec, idx, val);
941 	}
942 	rcu_read_unlock();
943 }
944 
945 /**
946  * __count_memcg_events - account VM events in a cgroup
947  * @memcg: the memory cgroup
948  * @idx: the event item
949  * @count: the number of events that occurred
950  */
951 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
952 			  unsigned long count)
953 {
954 	int index = memcg_events_index(idx);
955 
956 	if (mem_cgroup_disabled() || index < 0)
957 		return;
958 
959 	memcg_stats_lock();
960 	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
961 	memcg_rstat_updated(memcg, count);
962 	memcg_stats_unlock();
963 }
964 
965 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
966 {
967 	int index = memcg_events_index(event);
968 
969 	if (index < 0)
970 		return 0;
971 	return READ_ONCE(memcg->vmstats->events[index]);
972 }
973 
974 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
975 {
976 	int index = memcg_events_index(event);
977 
978 	if (index < 0)
979 		return 0;
980 
981 	return READ_ONCE(memcg->vmstats->events_local[index]);
982 }
983 
984 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
985 					 int nr_pages)
986 {
987 	/* pagein of a big page is an event. So, ignore page size */
988 	if (nr_pages > 0)
989 		__count_memcg_events(memcg, PGPGIN, 1);
990 	else {
991 		__count_memcg_events(memcg, PGPGOUT, 1);
992 		nr_pages = -nr_pages; /* for event */
993 	}
994 
995 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
996 }
997 
998 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
999 				       enum mem_cgroup_events_target target)
1000 {
1001 	unsigned long val, next;
1002 
1003 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1004 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1005 	/* from time_after() in jiffies.h */
1006 	if ((long)(next - val) < 0) {
1007 		switch (target) {
1008 		case MEM_CGROUP_TARGET_THRESH:
1009 			next = val + THRESHOLDS_EVENTS_TARGET;
1010 			break;
1011 		case MEM_CGROUP_TARGET_SOFTLIMIT:
1012 			next = val + SOFTLIMIT_EVENTS_TARGET;
1013 			break;
1014 		default:
1015 			break;
1016 		}
1017 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1018 		return true;
1019 	}
1020 	return false;
1021 }
1022 
1023 /*
1024  * Check events in order.
1025  *
1026  */
1027 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1028 {
1029 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1030 		return;
1031 
1032 	/* threshold event is triggered in finer grain than soft limit */
1033 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1034 						MEM_CGROUP_TARGET_THRESH))) {
1035 		bool do_softlimit;
1036 
1037 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1038 						MEM_CGROUP_TARGET_SOFTLIMIT);
1039 		mem_cgroup_threshold(memcg);
1040 		if (unlikely(do_softlimit))
1041 			mem_cgroup_update_tree(memcg, nid);
1042 	}
1043 }
1044 
1045 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1046 {
1047 	/*
1048 	 * mm_update_next_owner() may clear mm->owner to NULL
1049 	 * if it races with swapoff, page migration, etc.
1050 	 * So this can be called with p == NULL.
1051 	 */
1052 	if (unlikely(!p))
1053 		return NULL;
1054 
1055 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1056 }
1057 EXPORT_SYMBOL(mem_cgroup_from_task);
1058 
1059 static __always_inline struct mem_cgroup *active_memcg(void)
1060 {
1061 	if (!in_task())
1062 		return this_cpu_read(int_active_memcg);
1063 	else
1064 		return current->active_memcg;
1065 }
1066 
1067 /**
1068  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1069  * @mm: mm from which memcg should be extracted. It can be NULL.
1070  *
1071  * Obtain a reference on mm->memcg and returns it if successful. If mm
1072  * is NULL, then the memcg is chosen as follows:
1073  * 1) The active memcg, if set.
1074  * 2) current->mm->memcg, if available
1075  * 3) root memcg
1076  * If mem_cgroup is disabled, NULL is returned.
1077  */
1078 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1079 {
1080 	struct mem_cgroup *memcg;
1081 
1082 	if (mem_cgroup_disabled())
1083 		return NULL;
1084 
1085 	/*
1086 	 * Page cache insertions can happen without an
1087 	 * actual mm context, e.g. during disk probing
1088 	 * on boot, loopback IO, acct() writes etc.
1089 	 *
1090 	 * No need to css_get on root memcg as the reference
1091 	 * counting is disabled on the root level in the
1092 	 * cgroup core. See CSS_NO_REF.
1093 	 */
1094 	if (unlikely(!mm)) {
1095 		memcg = active_memcg();
1096 		if (unlikely(memcg)) {
1097 			/* remote memcg must hold a ref */
1098 			css_get(&memcg->css);
1099 			return memcg;
1100 		}
1101 		mm = current->mm;
1102 		if (unlikely(!mm))
1103 			return root_mem_cgroup;
1104 	}
1105 
1106 	rcu_read_lock();
1107 	do {
1108 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1109 		if (unlikely(!memcg))
1110 			memcg = root_mem_cgroup;
1111 	} while (!css_tryget(&memcg->css));
1112 	rcu_read_unlock();
1113 	return memcg;
1114 }
1115 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1116 
1117 /**
1118  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1119  */
1120 struct mem_cgroup *get_mem_cgroup_from_current(void)
1121 {
1122 	struct mem_cgroup *memcg;
1123 
1124 	if (mem_cgroup_disabled())
1125 		return NULL;
1126 
1127 again:
1128 	rcu_read_lock();
1129 	memcg = mem_cgroup_from_task(current);
1130 	if (!css_tryget(&memcg->css)) {
1131 		rcu_read_unlock();
1132 		goto again;
1133 	}
1134 	rcu_read_unlock();
1135 	return memcg;
1136 }
1137 
1138 /**
1139  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1140  * @root: hierarchy root
1141  * @prev: previously returned memcg, NULL on first invocation
1142  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1143  *
1144  * Returns references to children of the hierarchy below @root, or
1145  * @root itself, or %NULL after a full round-trip.
1146  *
1147  * Caller must pass the return value in @prev on subsequent
1148  * invocations for reference counting, or use mem_cgroup_iter_break()
1149  * to cancel a hierarchy walk before the round-trip is complete.
1150  *
1151  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1152  * in the hierarchy among all concurrent reclaimers operating on the
1153  * same node.
1154  */
1155 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1156 				   struct mem_cgroup *prev,
1157 				   struct mem_cgroup_reclaim_cookie *reclaim)
1158 {
1159 	struct mem_cgroup_reclaim_iter *iter;
1160 	struct cgroup_subsys_state *css = NULL;
1161 	struct mem_cgroup *memcg = NULL;
1162 	struct mem_cgroup *pos = NULL;
1163 
1164 	if (mem_cgroup_disabled())
1165 		return NULL;
1166 
1167 	if (!root)
1168 		root = root_mem_cgroup;
1169 
1170 	rcu_read_lock();
1171 
1172 	if (reclaim) {
1173 		struct mem_cgroup_per_node *mz;
1174 
1175 		mz = root->nodeinfo[reclaim->pgdat->node_id];
1176 		iter = &mz->iter;
1177 
1178 		/*
1179 		 * On start, join the current reclaim iteration cycle.
1180 		 * Exit when a concurrent walker completes it.
1181 		 */
1182 		if (!prev)
1183 			reclaim->generation = iter->generation;
1184 		else if (reclaim->generation != iter->generation)
1185 			goto out_unlock;
1186 
1187 		while (1) {
1188 			pos = READ_ONCE(iter->position);
1189 			if (!pos || css_tryget(&pos->css))
1190 				break;
1191 			/*
1192 			 * css reference reached zero, so iter->position will
1193 			 * be cleared by ->css_released. However, we should not
1194 			 * rely on this happening soon, because ->css_released
1195 			 * is called from a work queue, and by busy-waiting we
1196 			 * might block it. So we clear iter->position right
1197 			 * away.
1198 			 */
1199 			(void)cmpxchg(&iter->position, pos, NULL);
1200 		}
1201 	} else if (prev) {
1202 		pos = prev;
1203 	}
1204 
1205 	if (pos)
1206 		css = &pos->css;
1207 
1208 	for (;;) {
1209 		css = css_next_descendant_pre(css, &root->css);
1210 		if (!css) {
1211 			/*
1212 			 * Reclaimers share the hierarchy walk, and a
1213 			 * new one might jump in right at the end of
1214 			 * the hierarchy - make sure they see at least
1215 			 * one group and restart from the beginning.
1216 			 */
1217 			if (!prev)
1218 				continue;
1219 			break;
1220 		}
1221 
1222 		/*
1223 		 * Verify the css and acquire a reference.  The root
1224 		 * is provided by the caller, so we know it's alive
1225 		 * and kicking, and don't take an extra reference.
1226 		 */
1227 		if (css == &root->css || css_tryget(css)) {
1228 			memcg = mem_cgroup_from_css(css);
1229 			break;
1230 		}
1231 	}
1232 
1233 	if (reclaim) {
1234 		/*
1235 		 * The position could have already been updated by a competing
1236 		 * thread, so check that the value hasn't changed since we read
1237 		 * it to avoid reclaiming from the same cgroup twice.
1238 		 */
1239 		(void)cmpxchg(&iter->position, pos, memcg);
1240 
1241 		if (pos)
1242 			css_put(&pos->css);
1243 
1244 		if (!memcg)
1245 			iter->generation++;
1246 	}
1247 
1248 out_unlock:
1249 	rcu_read_unlock();
1250 	if (prev && prev != root)
1251 		css_put(&prev->css);
1252 
1253 	return memcg;
1254 }
1255 
1256 /**
1257  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1258  * @root: hierarchy root
1259  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1260  */
1261 void mem_cgroup_iter_break(struct mem_cgroup *root,
1262 			   struct mem_cgroup *prev)
1263 {
1264 	if (!root)
1265 		root = root_mem_cgroup;
1266 	if (prev && prev != root)
1267 		css_put(&prev->css);
1268 }
1269 
1270 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1271 					struct mem_cgroup *dead_memcg)
1272 {
1273 	struct mem_cgroup_reclaim_iter *iter;
1274 	struct mem_cgroup_per_node *mz;
1275 	int nid;
1276 
1277 	for_each_node(nid) {
1278 		mz = from->nodeinfo[nid];
1279 		iter = &mz->iter;
1280 		cmpxchg(&iter->position, dead_memcg, NULL);
1281 	}
1282 }
1283 
1284 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1285 {
1286 	struct mem_cgroup *memcg = dead_memcg;
1287 	struct mem_cgroup *last;
1288 
1289 	do {
1290 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1291 		last = memcg;
1292 	} while ((memcg = parent_mem_cgroup(memcg)));
1293 
1294 	/*
1295 	 * When cgroup1 non-hierarchy mode is used,
1296 	 * parent_mem_cgroup() does not walk all the way up to the
1297 	 * cgroup root (root_mem_cgroup). So we have to handle
1298 	 * dead_memcg from cgroup root separately.
1299 	 */
1300 	if (!mem_cgroup_is_root(last))
1301 		__invalidate_reclaim_iterators(root_mem_cgroup,
1302 						dead_memcg);
1303 }
1304 
1305 /**
1306  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1307  * @memcg: hierarchy root
1308  * @fn: function to call for each task
1309  * @arg: argument passed to @fn
1310  *
1311  * This function iterates over tasks attached to @memcg or to any of its
1312  * descendants and calls @fn for each task. If @fn returns a non-zero
1313  * value, the function breaks the iteration loop. Otherwise, it will iterate
1314  * over all tasks and return 0.
1315  *
1316  * This function must not be called for the root memory cgroup.
1317  */
1318 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1319 			   int (*fn)(struct task_struct *, void *), void *arg)
1320 {
1321 	struct mem_cgroup *iter;
1322 	int ret = 0;
1323 
1324 	BUG_ON(mem_cgroup_is_root(memcg));
1325 
1326 	for_each_mem_cgroup_tree(iter, memcg) {
1327 		struct css_task_iter it;
1328 		struct task_struct *task;
1329 
1330 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1331 		while (!ret && (task = css_task_iter_next(&it)))
1332 			ret = fn(task, arg);
1333 		css_task_iter_end(&it);
1334 		if (ret) {
1335 			mem_cgroup_iter_break(memcg, iter);
1336 			break;
1337 		}
1338 	}
1339 }
1340 
1341 #ifdef CONFIG_DEBUG_VM
1342 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1343 {
1344 	struct mem_cgroup *memcg;
1345 
1346 	if (mem_cgroup_disabled())
1347 		return;
1348 
1349 	memcg = folio_memcg(folio);
1350 
1351 	if (!memcg)
1352 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1353 	else
1354 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1355 }
1356 #endif
1357 
1358 /**
1359  * folio_lruvec_lock - Lock the lruvec for a folio.
1360  * @folio: Pointer to the folio.
1361  *
1362  * These functions are safe to use under any of the following conditions:
1363  * - folio locked
1364  * - folio_test_lru false
1365  * - folio_memcg_lock()
1366  * - folio frozen (refcount of 0)
1367  *
1368  * Return: The lruvec this folio is on with its lock held.
1369  */
1370 struct lruvec *folio_lruvec_lock(struct folio *folio)
1371 {
1372 	struct lruvec *lruvec = folio_lruvec(folio);
1373 
1374 	spin_lock(&lruvec->lru_lock);
1375 	lruvec_memcg_debug(lruvec, folio);
1376 
1377 	return lruvec;
1378 }
1379 
1380 /**
1381  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1382  * @folio: Pointer to the folio.
1383  *
1384  * These functions are safe to use under any of the following conditions:
1385  * - folio locked
1386  * - folio_test_lru false
1387  * - folio_memcg_lock()
1388  * - folio frozen (refcount of 0)
1389  *
1390  * Return: The lruvec this folio is on with its lock held and interrupts
1391  * disabled.
1392  */
1393 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1394 {
1395 	struct lruvec *lruvec = folio_lruvec(folio);
1396 
1397 	spin_lock_irq(&lruvec->lru_lock);
1398 	lruvec_memcg_debug(lruvec, folio);
1399 
1400 	return lruvec;
1401 }
1402 
1403 /**
1404  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1405  * @folio: Pointer to the folio.
1406  * @flags: Pointer to irqsave flags.
1407  *
1408  * These functions are safe to use under any of the following conditions:
1409  * - folio locked
1410  * - folio_test_lru false
1411  * - folio_memcg_lock()
1412  * - folio frozen (refcount of 0)
1413  *
1414  * Return: The lruvec this folio is on with its lock held and interrupts
1415  * disabled.
1416  */
1417 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1418 		unsigned long *flags)
1419 {
1420 	struct lruvec *lruvec = folio_lruvec(folio);
1421 
1422 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1423 	lruvec_memcg_debug(lruvec, folio);
1424 
1425 	return lruvec;
1426 }
1427 
1428 /**
1429  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430  * @lruvec: mem_cgroup per zone lru vector
1431  * @lru: index of lru list the page is sitting on
1432  * @zid: zone id of the accounted pages
1433  * @nr_pages: positive when adding or negative when removing
1434  *
1435  * This function must be called under lru_lock, just before a page is added
1436  * to or just after a page is removed from an lru list.
1437  */
1438 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439 				int zid, int nr_pages)
1440 {
1441 	struct mem_cgroup_per_node *mz;
1442 	unsigned long *lru_size;
1443 	long size;
1444 
1445 	if (mem_cgroup_disabled())
1446 		return;
1447 
1448 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1449 	lru_size = &mz->lru_zone_size[zid][lru];
1450 
1451 	if (nr_pages < 0)
1452 		*lru_size += nr_pages;
1453 
1454 	size = *lru_size;
1455 	if (WARN_ONCE(size < 0,
1456 		"%s(%p, %d, %d): lru_size %ld\n",
1457 		__func__, lruvec, lru, nr_pages, size)) {
1458 		VM_BUG_ON(1);
1459 		*lru_size = 0;
1460 	}
1461 
1462 	if (nr_pages > 0)
1463 		*lru_size += nr_pages;
1464 }
1465 
1466 /**
1467  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1468  * @memcg: the memory cgroup
1469  *
1470  * Returns the maximum amount of memory @mem can be charged with, in
1471  * pages.
1472  */
1473 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1474 {
1475 	unsigned long margin = 0;
1476 	unsigned long count;
1477 	unsigned long limit;
1478 
1479 	count = page_counter_read(&memcg->memory);
1480 	limit = READ_ONCE(memcg->memory.max);
1481 	if (count < limit)
1482 		margin = limit - count;
1483 
1484 	if (do_memsw_account()) {
1485 		count = page_counter_read(&memcg->memsw);
1486 		limit = READ_ONCE(memcg->memsw.max);
1487 		if (count < limit)
1488 			margin = min(margin, limit - count);
1489 		else
1490 			margin = 0;
1491 	}
1492 
1493 	return margin;
1494 }
1495 
1496 /*
1497  * A routine for checking "mem" is under move_account() or not.
1498  *
1499  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1500  * moving cgroups. This is for waiting at high-memory pressure
1501  * caused by "move".
1502  */
1503 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1504 {
1505 	struct mem_cgroup *from;
1506 	struct mem_cgroup *to;
1507 	bool ret = false;
1508 	/*
1509 	 * Unlike task_move routines, we access mc.to, mc.from not under
1510 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1511 	 */
1512 	spin_lock(&mc.lock);
1513 	from = mc.from;
1514 	to = mc.to;
1515 	if (!from)
1516 		goto unlock;
1517 
1518 	ret = mem_cgroup_is_descendant(from, memcg) ||
1519 		mem_cgroup_is_descendant(to, memcg);
1520 unlock:
1521 	spin_unlock(&mc.lock);
1522 	return ret;
1523 }
1524 
1525 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1526 {
1527 	if (mc.moving_task && current != mc.moving_task) {
1528 		if (mem_cgroup_under_move(memcg)) {
1529 			DEFINE_WAIT(wait);
1530 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1531 			/* moving charge context might have finished. */
1532 			if (mc.moving_task)
1533 				schedule();
1534 			finish_wait(&mc.waitq, &wait);
1535 			return true;
1536 		}
1537 	}
1538 	return false;
1539 }
1540 
1541 struct memory_stat {
1542 	const char *name;
1543 	unsigned int idx;
1544 };
1545 
1546 static const struct memory_stat memory_stats[] = {
1547 	{ "anon",			NR_ANON_MAPPED			},
1548 	{ "file",			NR_FILE_PAGES			},
1549 	{ "kernel",			MEMCG_KMEM			},
1550 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1551 	{ "pagetables",			NR_PAGETABLE			},
1552 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1553 	{ "percpu",			MEMCG_PERCPU_B			},
1554 	{ "sock",			MEMCG_SOCK			},
1555 	{ "vmalloc",			MEMCG_VMALLOC			},
1556 	{ "shmem",			NR_SHMEM			},
1557 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1558 	{ "zswap",			MEMCG_ZSWAP_B			},
1559 	{ "zswapped",			MEMCG_ZSWAPPED			},
1560 #endif
1561 	{ "file_mapped",		NR_FILE_MAPPED			},
1562 	{ "file_dirty",			NR_FILE_DIRTY			},
1563 	{ "file_writeback",		NR_WRITEBACK			},
1564 #ifdef CONFIG_SWAP
1565 	{ "swapcached",			NR_SWAPCACHE			},
1566 #endif
1567 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1568 	{ "anon_thp",			NR_ANON_THPS			},
1569 	{ "file_thp",			NR_FILE_THPS			},
1570 	{ "shmem_thp",			NR_SHMEM_THPS			},
1571 #endif
1572 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1573 	{ "active_anon",		NR_ACTIVE_ANON			},
1574 	{ "inactive_file",		NR_INACTIVE_FILE		},
1575 	{ "active_file",		NR_ACTIVE_FILE			},
1576 	{ "unevictable",		NR_UNEVICTABLE			},
1577 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1578 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1579 
1580 	/* The memory events */
1581 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1582 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1583 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1584 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1585 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1586 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1587 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1588 };
1589 
1590 /* The actual unit of the state item, not the same as the output unit */
1591 static int memcg_page_state_unit(int item)
1592 {
1593 	switch (item) {
1594 	case MEMCG_PERCPU_B:
1595 	case MEMCG_ZSWAP_B:
1596 	case NR_SLAB_RECLAIMABLE_B:
1597 	case NR_SLAB_UNRECLAIMABLE_B:
1598 		return 1;
1599 	case NR_KERNEL_STACK_KB:
1600 		return SZ_1K;
1601 	default:
1602 		return PAGE_SIZE;
1603 	}
1604 }
1605 
1606 /* Translate stat items to the correct unit for memory.stat output */
1607 static int memcg_page_state_output_unit(int item)
1608 {
1609 	/*
1610 	 * Workingset state is actually in pages, but we export it to userspace
1611 	 * as a scalar count of events, so special case it here.
1612 	 */
1613 	switch (item) {
1614 	case WORKINGSET_REFAULT_ANON:
1615 	case WORKINGSET_REFAULT_FILE:
1616 	case WORKINGSET_ACTIVATE_ANON:
1617 	case WORKINGSET_ACTIVATE_FILE:
1618 	case WORKINGSET_RESTORE_ANON:
1619 	case WORKINGSET_RESTORE_FILE:
1620 	case WORKINGSET_NODERECLAIM:
1621 		return 1;
1622 	default:
1623 		return memcg_page_state_unit(item);
1624 	}
1625 }
1626 
1627 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1628 						    int item)
1629 {
1630 	return memcg_page_state(memcg, item) *
1631 		memcg_page_state_output_unit(item);
1632 }
1633 
1634 static inline unsigned long memcg_page_state_local_output(
1635 		struct mem_cgroup *memcg, int item)
1636 {
1637 	return memcg_page_state_local(memcg, item) *
1638 		memcg_page_state_output_unit(item);
1639 }
1640 
1641 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1642 {
1643 	int i;
1644 
1645 	/*
1646 	 * Provide statistics on the state of the memory subsystem as
1647 	 * well as cumulative event counters that show past behavior.
1648 	 *
1649 	 * This list is ordered following a combination of these gradients:
1650 	 * 1) generic big picture -> specifics and details
1651 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1652 	 *
1653 	 * Current memory state:
1654 	 */
1655 	mem_cgroup_flush_stats(memcg);
1656 
1657 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1658 		u64 size;
1659 
1660 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1661 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1662 
1663 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1664 			size += memcg_page_state_output(memcg,
1665 							NR_SLAB_RECLAIMABLE_B);
1666 			seq_buf_printf(s, "slab %llu\n", size);
1667 		}
1668 	}
1669 
1670 	/* Accumulated memory events */
1671 	seq_buf_printf(s, "pgscan %lu\n",
1672 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1673 		       memcg_events(memcg, PGSCAN_DIRECT) +
1674 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1675 	seq_buf_printf(s, "pgsteal %lu\n",
1676 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1677 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1678 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1679 
1680 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1681 		if (memcg_vm_event_stat[i] == PGPGIN ||
1682 		    memcg_vm_event_stat[i] == PGPGOUT)
1683 			continue;
1684 
1685 		seq_buf_printf(s, "%s %lu\n",
1686 			       vm_event_name(memcg_vm_event_stat[i]),
1687 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1688 	}
1689 
1690 	/* The above should easily fit into one page */
1691 	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1692 }
1693 
1694 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1695 
1696 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1697 {
1698 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699 		memcg_stat_format(memcg, s);
1700 	else
1701 		memcg1_stat_format(memcg, s);
1702 	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1703 }
1704 
1705 /**
1706  * mem_cgroup_print_oom_context: Print OOM information relevant to
1707  * memory controller.
1708  * @memcg: The memory cgroup that went over limit
1709  * @p: Task that is going to be killed
1710  *
1711  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1712  * enabled
1713  */
1714 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1715 {
1716 	rcu_read_lock();
1717 
1718 	if (memcg) {
1719 		pr_cont(",oom_memcg=");
1720 		pr_cont_cgroup_path(memcg->css.cgroup);
1721 	} else
1722 		pr_cont(",global_oom");
1723 	if (p) {
1724 		pr_cont(",task_memcg=");
1725 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1726 	}
1727 	rcu_read_unlock();
1728 }
1729 
1730 /**
1731  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1732  * memory controller.
1733  * @memcg: The memory cgroup that went over limit
1734  */
1735 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1736 {
1737 	/* Use static buffer, for the caller is holding oom_lock. */
1738 	static char buf[PAGE_SIZE];
1739 	struct seq_buf s;
1740 
1741 	lockdep_assert_held(&oom_lock);
1742 
1743 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1744 		K((u64)page_counter_read(&memcg->memory)),
1745 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1746 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1747 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1748 			K((u64)page_counter_read(&memcg->swap)),
1749 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1750 	else {
1751 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1752 			K((u64)page_counter_read(&memcg->memsw)),
1753 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1754 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1755 			K((u64)page_counter_read(&memcg->kmem)),
1756 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1757 	}
1758 
1759 	pr_info("Memory cgroup stats for ");
1760 	pr_cont_cgroup_path(memcg->css.cgroup);
1761 	pr_cont(":");
1762 	seq_buf_init(&s, buf, sizeof(buf));
1763 	memory_stat_format(memcg, &s);
1764 	seq_buf_do_printk(&s, KERN_INFO);
1765 }
1766 
1767 /*
1768  * Return the memory (and swap, if configured) limit for a memcg.
1769  */
1770 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1771 {
1772 	unsigned long max = READ_ONCE(memcg->memory.max);
1773 
1774 	if (do_memsw_account()) {
1775 		if (mem_cgroup_swappiness(memcg)) {
1776 			/* Calculate swap excess capacity from memsw limit */
1777 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1778 
1779 			max += min(swap, (unsigned long)total_swap_pages);
1780 		}
1781 	} else {
1782 		if (mem_cgroup_swappiness(memcg))
1783 			max += min(READ_ONCE(memcg->swap.max),
1784 				   (unsigned long)total_swap_pages);
1785 	}
1786 	return max;
1787 }
1788 
1789 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1790 {
1791 	return page_counter_read(&memcg->memory);
1792 }
1793 
1794 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1795 				     int order)
1796 {
1797 	struct oom_control oc = {
1798 		.zonelist = NULL,
1799 		.nodemask = NULL,
1800 		.memcg = memcg,
1801 		.gfp_mask = gfp_mask,
1802 		.order = order,
1803 	};
1804 	bool ret = true;
1805 
1806 	if (mutex_lock_killable(&oom_lock))
1807 		return true;
1808 
1809 	if (mem_cgroup_margin(memcg) >= (1 << order))
1810 		goto unlock;
1811 
1812 	/*
1813 	 * A few threads which were not waiting at mutex_lock_killable() can
1814 	 * fail to bail out. Therefore, check again after holding oom_lock.
1815 	 */
1816 	ret = task_is_dying() || out_of_memory(&oc);
1817 
1818 unlock:
1819 	mutex_unlock(&oom_lock);
1820 	return ret;
1821 }
1822 
1823 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1824 				   pg_data_t *pgdat,
1825 				   gfp_t gfp_mask,
1826 				   unsigned long *total_scanned)
1827 {
1828 	struct mem_cgroup *victim = NULL;
1829 	int total = 0;
1830 	int loop = 0;
1831 	unsigned long excess;
1832 	unsigned long nr_scanned;
1833 	struct mem_cgroup_reclaim_cookie reclaim = {
1834 		.pgdat = pgdat,
1835 	};
1836 
1837 	excess = soft_limit_excess(root_memcg);
1838 
1839 	while (1) {
1840 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1841 		if (!victim) {
1842 			loop++;
1843 			if (loop >= 2) {
1844 				/*
1845 				 * If we have not been able to reclaim
1846 				 * anything, it might because there are
1847 				 * no reclaimable pages under this hierarchy
1848 				 */
1849 				if (!total)
1850 					break;
1851 				/*
1852 				 * We want to do more targeted reclaim.
1853 				 * excess >> 2 is not to excessive so as to
1854 				 * reclaim too much, nor too less that we keep
1855 				 * coming back to reclaim from this cgroup
1856 				 */
1857 				if (total >= (excess >> 2) ||
1858 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1859 					break;
1860 			}
1861 			continue;
1862 		}
1863 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1864 					pgdat, &nr_scanned);
1865 		*total_scanned += nr_scanned;
1866 		if (!soft_limit_excess(root_memcg))
1867 			break;
1868 	}
1869 	mem_cgroup_iter_break(root_memcg, victim);
1870 	return total;
1871 }
1872 
1873 #ifdef CONFIG_LOCKDEP
1874 static struct lockdep_map memcg_oom_lock_dep_map = {
1875 	.name = "memcg_oom_lock",
1876 };
1877 #endif
1878 
1879 static DEFINE_SPINLOCK(memcg_oom_lock);
1880 
1881 /*
1882  * Check OOM-Killer is already running under our hierarchy.
1883  * If someone is running, return false.
1884  */
1885 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1886 {
1887 	struct mem_cgroup *iter, *failed = NULL;
1888 
1889 	spin_lock(&memcg_oom_lock);
1890 
1891 	for_each_mem_cgroup_tree(iter, memcg) {
1892 		if (iter->oom_lock) {
1893 			/*
1894 			 * this subtree of our hierarchy is already locked
1895 			 * so we cannot give a lock.
1896 			 */
1897 			failed = iter;
1898 			mem_cgroup_iter_break(memcg, iter);
1899 			break;
1900 		} else
1901 			iter->oom_lock = true;
1902 	}
1903 
1904 	if (failed) {
1905 		/*
1906 		 * OK, we failed to lock the whole subtree so we have
1907 		 * to clean up what we set up to the failing subtree
1908 		 */
1909 		for_each_mem_cgroup_tree(iter, memcg) {
1910 			if (iter == failed) {
1911 				mem_cgroup_iter_break(memcg, iter);
1912 				break;
1913 			}
1914 			iter->oom_lock = false;
1915 		}
1916 	} else
1917 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1918 
1919 	spin_unlock(&memcg_oom_lock);
1920 
1921 	return !failed;
1922 }
1923 
1924 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1925 {
1926 	struct mem_cgroup *iter;
1927 
1928 	spin_lock(&memcg_oom_lock);
1929 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1930 	for_each_mem_cgroup_tree(iter, memcg)
1931 		iter->oom_lock = false;
1932 	spin_unlock(&memcg_oom_lock);
1933 }
1934 
1935 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1936 {
1937 	struct mem_cgroup *iter;
1938 
1939 	spin_lock(&memcg_oom_lock);
1940 	for_each_mem_cgroup_tree(iter, memcg)
1941 		iter->under_oom++;
1942 	spin_unlock(&memcg_oom_lock);
1943 }
1944 
1945 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1946 {
1947 	struct mem_cgroup *iter;
1948 
1949 	/*
1950 	 * Be careful about under_oom underflows because a child memcg
1951 	 * could have been added after mem_cgroup_mark_under_oom.
1952 	 */
1953 	spin_lock(&memcg_oom_lock);
1954 	for_each_mem_cgroup_tree(iter, memcg)
1955 		if (iter->under_oom > 0)
1956 			iter->under_oom--;
1957 	spin_unlock(&memcg_oom_lock);
1958 }
1959 
1960 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1961 
1962 struct oom_wait_info {
1963 	struct mem_cgroup *memcg;
1964 	wait_queue_entry_t	wait;
1965 };
1966 
1967 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1968 	unsigned mode, int sync, void *arg)
1969 {
1970 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1971 	struct mem_cgroup *oom_wait_memcg;
1972 	struct oom_wait_info *oom_wait_info;
1973 
1974 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1975 	oom_wait_memcg = oom_wait_info->memcg;
1976 
1977 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1978 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1979 		return 0;
1980 	return autoremove_wake_function(wait, mode, sync, arg);
1981 }
1982 
1983 static void memcg_oom_recover(struct mem_cgroup *memcg)
1984 {
1985 	/*
1986 	 * For the following lockless ->under_oom test, the only required
1987 	 * guarantee is that it must see the state asserted by an OOM when
1988 	 * this function is called as a result of userland actions
1989 	 * triggered by the notification of the OOM.  This is trivially
1990 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1991 	 * triggering notification.
1992 	 */
1993 	if (memcg && memcg->under_oom)
1994 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1995 }
1996 
1997 /*
1998  * Returns true if successfully killed one or more processes. Though in some
1999  * corner cases it can return true even without killing any process.
2000  */
2001 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2002 {
2003 	bool locked, ret;
2004 
2005 	if (order > PAGE_ALLOC_COSTLY_ORDER)
2006 		return false;
2007 
2008 	memcg_memory_event(memcg, MEMCG_OOM);
2009 
2010 	/*
2011 	 * We are in the middle of the charge context here, so we
2012 	 * don't want to block when potentially sitting on a callstack
2013 	 * that holds all kinds of filesystem and mm locks.
2014 	 *
2015 	 * cgroup1 allows disabling the OOM killer and waiting for outside
2016 	 * handling until the charge can succeed; remember the context and put
2017 	 * the task to sleep at the end of the page fault when all locks are
2018 	 * released.
2019 	 *
2020 	 * On the other hand, in-kernel OOM killer allows for an async victim
2021 	 * memory reclaim (oom_reaper) and that means that we are not solely
2022 	 * relying on the oom victim to make a forward progress and we can
2023 	 * invoke the oom killer here.
2024 	 *
2025 	 * Please note that mem_cgroup_out_of_memory might fail to find a
2026 	 * victim and then we have to bail out from the charge path.
2027 	 */
2028 	if (READ_ONCE(memcg->oom_kill_disable)) {
2029 		if (current->in_user_fault) {
2030 			css_get(&memcg->css);
2031 			current->memcg_in_oom = memcg;
2032 			current->memcg_oom_gfp_mask = mask;
2033 			current->memcg_oom_order = order;
2034 		}
2035 		return false;
2036 	}
2037 
2038 	mem_cgroup_mark_under_oom(memcg);
2039 
2040 	locked = mem_cgroup_oom_trylock(memcg);
2041 
2042 	if (locked)
2043 		mem_cgroup_oom_notify(memcg);
2044 
2045 	mem_cgroup_unmark_under_oom(memcg);
2046 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
2047 
2048 	if (locked)
2049 		mem_cgroup_oom_unlock(memcg);
2050 
2051 	return ret;
2052 }
2053 
2054 /**
2055  * mem_cgroup_oom_synchronize - complete memcg OOM handling
2056  * @handle: actually kill/wait or just clean up the OOM state
2057  *
2058  * This has to be called at the end of a page fault if the memcg OOM
2059  * handler was enabled.
2060  *
2061  * Memcg supports userspace OOM handling where failed allocations must
2062  * sleep on a waitqueue until the userspace task resolves the
2063  * situation.  Sleeping directly in the charge context with all kinds
2064  * of locks held is not a good idea, instead we remember an OOM state
2065  * in the task and mem_cgroup_oom_synchronize() has to be called at
2066  * the end of the page fault to complete the OOM handling.
2067  *
2068  * Returns %true if an ongoing memcg OOM situation was detected and
2069  * completed, %false otherwise.
2070  */
2071 bool mem_cgroup_oom_synchronize(bool handle)
2072 {
2073 	struct mem_cgroup *memcg = current->memcg_in_oom;
2074 	struct oom_wait_info owait;
2075 	bool locked;
2076 
2077 	/* OOM is global, do not handle */
2078 	if (!memcg)
2079 		return false;
2080 
2081 	if (!handle)
2082 		goto cleanup;
2083 
2084 	owait.memcg = memcg;
2085 	owait.wait.flags = 0;
2086 	owait.wait.func = memcg_oom_wake_function;
2087 	owait.wait.private = current;
2088 	INIT_LIST_HEAD(&owait.wait.entry);
2089 
2090 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2091 	mem_cgroup_mark_under_oom(memcg);
2092 
2093 	locked = mem_cgroup_oom_trylock(memcg);
2094 
2095 	if (locked)
2096 		mem_cgroup_oom_notify(memcg);
2097 
2098 	schedule();
2099 	mem_cgroup_unmark_under_oom(memcg);
2100 	finish_wait(&memcg_oom_waitq, &owait.wait);
2101 
2102 	if (locked)
2103 		mem_cgroup_oom_unlock(memcg);
2104 cleanup:
2105 	current->memcg_in_oom = NULL;
2106 	css_put(&memcg->css);
2107 	return true;
2108 }
2109 
2110 /**
2111  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2112  * @victim: task to be killed by the OOM killer
2113  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2114  *
2115  * Returns a pointer to a memory cgroup, which has to be cleaned up
2116  * by killing all belonging OOM-killable tasks.
2117  *
2118  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2119  */
2120 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2121 					    struct mem_cgroup *oom_domain)
2122 {
2123 	struct mem_cgroup *oom_group = NULL;
2124 	struct mem_cgroup *memcg;
2125 
2126 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2127 		return NULL;
2128 
2129 	if (!oom_domain)
2130 		oom_domain = root_mem_cgroup;
2131 
2132 	rcu_read_lock();
2133 
2134 	memcg = mem_cgroup_from_task(victim);
2135 	if (mem_cgroup_is_root(memcg))
2136 		goto out;
2137 
2138 	/*
2139 	 * If the victim task has been asynchronously moved to a different
2140 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2141 	 * In this case it's better to ignore memory.group.oom.
2142 	 */
2143 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2144 		goto out;
2145 
2146 	/*
2147 	 * Traverse the memory cgroup hierarchy from the victim task's
2148 	 * cgroup up to the OOMing cgroup (or root) to find the
2149 	 * highest-level memory cgroup with oom.group set.
2150 	 */
2151 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2152 		if (READ_ONCE(memcg->oom_group))
2153 			oom_group = memcg;
2154 
2155 		if (memcg == oom_domain)
2156 			break;
2157 	}
2158 
2159 	if (oom_group)
2160 		css_get(&oom_group->css);
2161 out:
2162 	rcu_read_unlock();
2163 
2164 	return oom_group;
2165 }
2166 
2167 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2168 {
2169 	pr_info("Tasks in ");
2170 	pr_cont_cgroup_path(memcg->css.cgroup);
2171 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2172 }
2173 
2174 /**
2175  * folio_memcg_lock - Bind a folio to its memcg.
2176  * @folio: The folio.
2177  *
2178  * This function prevents unlocked LRU folios from being moved to
2179  * another cgroup.
2180  *
2181  * It ensures lifetime of the bound memcg.  The caller is responsible
2182  * for the lifetime of the folio.
2183  */
2184 void folio_memcg_lock(struct folio *folio)
2185 {
2186 	struct mem_cgroup *memcg;
2187 	unsigned long flags;
2188 
2189 	/*
2190 	 * The RCU lock is held throughout the transaction.  The fast
2191 	 * path can get away without acquiring the memcg->move_lock
2192 	 * because page moving starts with an RCU grace period.
2193          */
2194 	rcu_read_lock();
2195 
2196 	if (mem_cgroup_disabled())
2197 		return;
2198 again:
2199 	memcg = folio_memcg(folio);
2200 	if (unlikely(!memcg))
2201 		return;
2202 
2203 #ifdef CONFIG_PROVE_LOCKING
2204 	local_irq_save(flags);
2205 	might_lock(&memcg->move_lock);
2206 	local_irq_restore(flags);
2207 #endif
2208 
2209 	if (atomic_read(&memcg->moving_account) <= 0)
2210 		return;
2211 
2212 	spin_lock_irqsave(&memcg->move_lock, flags);
2213 	if (memcg != folio_memcg(folio)) {
2214 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2215 		goto again;
2216 	}
2217 
2218 	/*
2219 	 * When charge migration first begins, we can have multiple
2220 	 * critical sections holding the fast-path RCU lock and one
2221 	 * holding the slowpath move_lock. Track the task who has the
2222 	 * move_lock for folio_memcg_unlock().
2223 	 */
2224 	memcg->move_lock_task = current;
2225 	memcg->move_lock_flags = flags;
2226 }
2227 
2228 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2229 {
2230 	if (memcg && memcg->move_lock_task == current) {
2231 		unsigned long flags = memcg->move_lock_flags;
2232 
2233 		memcg->move_lock_task = NULL;
2234 		memcg->move_lock_flags = 0;
2235 
2236 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2237 	}
2238 
2239 	rcu_read_unlock();
2240 }
2241 
2242 /**
2243  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2244  * @folio: The folio.
2245  *
2246  * This releases the binding created by folio_memcg_lock().  This does
2247  * not change the accounting of this folio to its memcg, but it does
2248  * permit others to change it.
2249  */
2250 void folio_memcg_unlock(struct folio *folio)
2251 {
2252 	__folio_memcg_unlock(folio_memcg(folio));
2253 }
2254 
2255 struct memcg_stock_pcp {
2256 	local_lock_t stock_lock;
2257 	struct mem_cgroup *cached; /* this never be root cgroup */
2258 	unsigned int nr_pages;
2259 
2260 #ifdef CONFIG_MEMCG_KMEM
2261 	struct obj_cgroup *cached_objcg;
2262 	struct pglist_data *cached_pgdat;
2263 	unsigned int nr_bytes;
2264 	int nr_slab_reclaimable_b;
2265 	int nr_slab_unreclaimable_b;
2266 #endif
2267 
2268 	struct work_struct work;
2269 	unsigned long flags;
2270 #define FLUSHING_CACHED_CHARGE	0
2271 };
2272 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2273 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2274 };
2275 static DEFINE_MUTEX(percpu_charge_mutex);
2276 
2277 #ifdef CONFIG_MEMCG_KMEM
2278 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2279 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2280 				     struct mem_cgroup *root_memcg);
2281 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2282 
2283 #else
2284 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2285 {
2286 	return NULL;
2287 }
2288 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2289 				     struct mem_cgroup *root_memcg)
2290 {
2291 	return false;
2292 }
2293 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2294 {
2295 }
2296 #endif
2297 
2298 /**
2299  * consume_stock: Try to consume stocked charge on this cpu.
2300  * @memcg: memcg to consume from.
2301  * @nr_pages: how many pages to charge.
2302  *
2303  * The charges will only happen if @memcg matches the current cpu's memcg
2304  * stock, and at least @nr_pages are available in that stock.  Failure to
2305  * service an allocation will refill the stock.
2306  *
2307  * returns true if successful, false otherwise.
2308  */
2309 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2310 {
2311 	struct memcg_stock_pcp *stock;
2312 	unsigned long flags;
2313 	bool ret = false;
2314 
2315 	if (nr_pages > MEMCG_CHARGE_BATCH)
2316 		return ret;
2317 
2318 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2319 
2320 	stock = this_cpu_ptr(&memcg_stock);
2321 	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2322 		stock->nr_pages -= nr_pages;
2323 		ret = true;
2324 	}
2325 
2326 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2327 
2328 	return ret;
2329 }
2330 
2331 /*
2332  * Returns stocks cached in percpu and reset cached information.
2333  */
2334 static void drain_stock(struct memcg_stock_pcp *stock)
2335 {
2336 	struct mem_cgroup *old = READ_ONCE(stock->cached);
2337 
2338 	if (!old)
2339 		return;
2340 
2341 	if (stock->nr_pages) {
2342 		page_counter_uncharge(&old->memory, stock->nr_pages);
2343 		if (do_memsw_account())
2344 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2345 		stock->nr_pages = 0;
2346 	}
2347 
2348 	css_put(&old->css);
2349 	WRITE_ONCE(stock->cached, NULL);
2350 }
2351 
2352 static void drain_local_stock(struct work_struct *dummy)
2353 {
2354 	struct memcg_stock_pcp *stock;
2355 	struct obj_cgroup *old = NULL;
2356 	unsigned long flags;
2357 
2358 	/*
2359 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2360 	 * drain_stock races is that we always operate on local CPU stock
2361 	 * here with IRQ disabled
2362 	 */
2363 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2364 
2365 	stock = this_cpu_ptr(&memcg_stock);
2366 	old = drain_obj_stock(stock);
2367 	drain_stock(stock);
2368 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2369 
2370 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2371 	if (old)
2372 		obj_cgroup_put(old);
2373 }
2374 
2375 /*
2376  * Cache charges(val) to local per_cpu area.
2377  * This will be consumed by consume_stock() function, later.
2378  */
2379 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2380 {
2381 	struct memcg_stock_pcp *stock;
2382 
2383 	stock = this_cpu_ptr(&memcg_stock);
2384 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2385 		drain_stock(stock);
2386 		css_get(&memcg->css);
2387 		WRITE_ONCE(stock->cached, memcg);
2388 	}
2389 	stock->nr_pages += nr_pages;
2390 
2391 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2392 		drain_stock(stock);
2393 }
2394 
2395 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2396 {
2397 	unsigned long flags;
2398 
2399 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2400 	__refill_stock(memcg, nr_pages);
2401 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2402 }
2403 
2404 /*
2405  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2406  * of the hierarchy under it.
2407  */
2408 static void drain_all_stock(struct mem_cgroup *root_memcg)
2409 {
2410 	int cpu, curcpu;
2411 
2412 	/* If someone's already draining, avoid adding running more workers. */
2413 	if (!mutex_trylock(&percpu_charge_mutex))
2414 		return;
2415 	/*
2416 	 * Notify other cpus that system-wide "drain" is running
2417 	 * We do not care about races with the cpu hotplug because cpu down
2418 	 * as well as workers from this path always operate on the local
2419 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2420 	 */
2421 	migrate_disable();
2422 	curcpu = smp_processor_id();
2423 	for_each_online_cpu(cpu) {
2424 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2425 		struct mem_cgroup *memcg;
2426 		bool flush = false;
2427 
2428 		rcu_read_lock();
2429 		memcg = READ_ONCE(stock->cached);
2430 		if (memcg && stock->nr_pages &&
2431 		    mem_cgroup_is_descendant(memcg, root_memcg))
2432 			flush = true;
2433 		else if (obj_stock_flush_required(stock, root_memcg))
2434 			flush = true;
2435 		rcu_read_unlock();
2436 
2437 		if (flush &&
2438 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2439 			if (cpu == curcpu)
2440 				drain_local_stock(&stock->work);
2441 			else if (!cpu_is_isolated(cpu))
2442 				schedule_work_on(cpu, &stock->work);
2443 		}
2444 	}
2445 	migrate_enable();
2446 	mutex_unlock(&percpu_charge_mutex);
2447 }
2448 
2449 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2450 {
2451 	struct memcg_stock_pcp *stock;
2452 
2453 	stock = &per_cpu(memcg_stock, cpu);
2454 	drain_stock(stock);
2455 
2456 	return 0;
2457 }
2458 
2459 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2460 				  unsigned int nr_pages,
2461 				  gfp_t gfp_mask)
2462 {
2463 	unsigned long nr_reclaimed = 0;
2464 
2465 	do {
2466 		unsigned long pflags;
2467 
2468 		if (page_counter_read(&memcg->memory) <=
2469 		    READ_ONCE(memcg->memory.high))
2470 			continue;
2471 
2472 		memcg_memory_event(memcg, MEMCG_HIGH);
2473 
2474 		psi_memstall_enter(&pflags);
2475 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2476 							gfp_mask,
2477 							MEMCG_RECLAIM_MAY_SWAP);
2478 		psi_memstall_leave(&pflags);
2479 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2480 		 !mem_cgroup_is_root(memcg));
2481 
2482 	return nr_reclaimed;
2483 }
2484 
2485 static void high_work_func(struct work_struct *work)
2486 {
2487 	struct mem_cgroup *memcg;
2488 
2489 	memcg = container_of(work, struct mem_cgroup, high_work);
2490 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2491 }
2492 
2493 /*
2494  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495  * enough to still cause a significant slowdown in most cases, while still
2496  * allowing diagnostics and tracing to proceed without becoming stuck.
2497  */
2498 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499 
2500 /*
2501  * When calculating the delay, we use these either side of the exponentiation to
2502  * maintain precision and scale to a reasonable number of jiffies (see the table
2503  * below.
2504  *
2505  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506  *   overage ratio to a delay.
2507  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2508  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2509  *   to produce a reasonable delay curve.
2510  *
2511  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512  * reasonable delay curve compared to precision-adjusted overage, not
2513  * penalising heavily at first, but still making sure that growth beyond the
2514  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515  * example, with a high of 100 megabytes:
2516  *
2517  *  +-------+------------------------+
2518  *  | usage | time to allocate in ms |
2519  *  +-------+------------------------+
2520  *  | 100M  |                      0 |
2521  *  | 101M  |                      6 |
2522  *  | 102M  |                     25 |
2523  *  | 103M  |                     57 |
2524  *  | 104M  |                    102 |
2525  *  | 105M  |                    159 |
2526  *  | 106M  |                    230 |
2527  *  | 107M  |                    313 |
2528  *  | 108M  |                    409 |
2529  *  | 109M  |                    518 |
2530  *  | 110M  |                    639 |
2531  *  | 111M  |                    774 |
2532  *  | 112M  |                    921 |
2533  *  | 113M  |                   1081 |
2534  *  | 114M  |                   1254 |
2535  *  | 115M  |                   1439 |
2536  *  | 116M  |                   1638 |
2537  *  | 117M  |                   1849 |
2538  *  | 118M  |                   2000 |
2539  *  | 119M  |                   2000 |
2540  *  | 120M  |                   2000 |
2541  *  +-------+------------------------+
2542  */
2543  #define MEMCG_DELAY_PRECISION_SHIFT 20
2544  #define MEMCG_DELAY_SCALING_SHIFT 14
2545 
2546 static u64 calculate_overage(unsigned long usage, unsigned long high)
2547 {
2548 	u64 overage;
2549 
2550 	if (usage <= high)
2551 		return 0;
2552 
2553 	/*
2554 	 * Prevent division by 0 in overage calculation by acting as if
2555 	 * it was a threshold of 1 page
2556 	 */
2557 	high = max(high, 1UL);
2558 
2559 	overage = usage - high;
2560 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561 	return div64_u64(overage, high);
2562 }
2563 
2564 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565 {
2566 	u64 overage, max_overage = 0;
2567 
2568 	do {
2569 		overage = calculate_overage(page_counter_read(&memcg->memory),
2570 					    READ_ONCE(memcg->memory.high));
2571 		max_overage = max(overage, max_overage);
2572 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2573 		 !mem_cgroup_is_root(memcg));
2574 
2575 	return max_overage;
2576 }
2577 
2578 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579 {
2580 	u64 overage, max_overage = 0;
2581 
2582 	do {
2583 		overage = calculate_overage(page_counter_read(&memcg->swap),
2584 					    READ_ONCE(memcg->swap.high));
2585 		if (overage)
2586 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587 		max_overage = max(overage, max_overage);
2588 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2589 		 !mem_cgroup_is_root(memcg));
2590 
2591 	return max_overage;
2592 }
2593 
2594 /*
2595  * Get the number of jiffies that we should penalise a mischievous cgroup which
2596  * is exceeding its memory.high by checking both it and its ancestors.
2597  */
2598 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2599 					  unsigned int nr_pages,
2600 					  u64 max_overage)
2601 {
2602 	unsigned long penalty_jiffies;
2603 
2604 	if (!max_overage)
2605 		return 0;
2606 
2607 	/*
2608 	 * We use overage compared to memory.high to calculate the number of
2609 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610 	 * fairly lenient on small overages, and increasingly harsh when the
2611 	 * memcg in question makes it clear that it has no intention of stopping
2612 	 * its crazy behaviour, so we exponentially increase the delay based on
2613 	 * overage amount.
2614 	 */
2615 	penalty_jiffies = max_overage * max_overage * HZ;
2616 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2618 
2619 	/*
2620 	 * Factor in the task's own contribution to the overage, such that four
2621 	 * N-sized allocations are throttled approximately the same as one
2622 	 * 4N-sized allocation.
2623 	 *
2624 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625 	 * larger the current charge patch is than that.
2626 	 */
2627 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2628 }
2629 
2630 /*
2631  * Reclaims memory over the high limit. Called directly from
2632  * try_charge() (context permitting), as well as from the userland
2633  * return path where reclaim is always able to block.
2634  */
2635 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2636 {
2637 	unsigned long penalty_jiffies;
2638 	unsigned long pflags;
2639 	unsigned long nr_reclaimed;
2640 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2641 	int nr_retries = MAX_RECLAIM_RETRIES;
2642 	struct mem_cgroup *memcg;
2643 	bool in_retry = false;
2644 
2645 	if (likely(!nr_pages))
2646 		return;
2647 
2648 	memcg = get_mem_cgroup_from_mm(current->mm);
2649 	current->memcg_nr_pages_over_high = 0;
2650 
2651 retry_reclaim:
2652 	/*
2653 	 * Bail if the task is already exiting. Unlike memory.max,
2654 	 * memory.high enforcement isn't as strict, and there is no
2655 	 * OOM killer involved, which means the excess could already
2656 	 * be much bigger (and still growing) than it could for
2657 	 * memory.max; the dying task could get stuck in fruitless
2658 	 * reclaim for a long time, which isn't desirable.
2659 	 */
2660 	if (task_is_dying())
2661 		goto out;
2662 
2663 	/*
2664 	 * The allocating task should reclaim at least the batch size, but for
2665 	 * subsequent retries we only want to do what's necessary to prevent oom
2666 	 * or breaching resource isolation.
2667 	 *
2668 	 * This is distinct from memory.max or page allocator behaviour because
2669 	 * memory.high is currently batched, whereas memory.max and the page
2670 	 * allocator run every time an allocation is made.
2671 	 */
2672 	nr_reclaimed = reclaim_high(memcg,
2673 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2674 				    gfp_mask);
2675 
2676 	/*
2677 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2678 	 * allocators proactively to slow down excessive growth.
2679 	 */
2680 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2681 					       mem_find_max_overage(memcg));
2682 
2683 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2684 						swap_find_max_overage(memcg));
2685 
2686 	/*
2687 	 * Clamp the max delay per usermode return so as to still keep the
2688 	 * application moving forwards and also permit diagnostics, albeit
2689 	 * extremely slowly.
2690 	 */
2691 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2692 
2693 	/*
2694 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2695 	 * that it's not even worth doing, in an attempt to be nice to those who
2696 	 * go only a small amount over their memory.high value and maybe haven't
2697 	 * been aggressively reclaimed enough yet.
2698 	 */
2699 	if (penalty_jiffies <= HZ / 100)
2700 		goto out;
2701 
2702 	/*
2703 	 * If reclaim is making forward progress but we're still over
2704 	 * memory.high, we want to encourage that rather than doing allocator
2705 	 * throttling.
2706 	 */
2707 	if (nr_reclaimed || nr_retries--) {
2708 		in_retry = true;
2709 		goto retry_reclaim;
2710 	}
2711 
2712 	/*
2713 	 * Reclaim didn't manage to push usage below the limit, slow
2714 	 * this allocating task down.
2715 	 *
2716 	 * If we exit early, we're guaranteed to die (since
2717 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2718 	 * need to account for any ill-begotten jiffies to pay them off later.
2719 	 */
2720 	psi_memstall_enter(&pflags);
2721 	schedule_timeout_killable(penalty_jiffies);
2722 	psi_memstall_leave(&pflags);
2723 
2724 out:
2725 	css_put(&memcg->css);
2726 }
2727 
2728 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2729 			unsigned int nr_pages)
2730 {
2731 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2732 	int nr_retries = MAX_RECLAIM_RETRIES;
2733 	struct mem_cgroup *mem_over_limit;
2734 	struct page_counter *counter;
2735 	unsigned long nr_reclaimed;
2736 	bool passed_oom = false;
2737 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2738 	bool drained = false;
2739 	bool raised_max_event = false;
2740 	unsigned long pflags;
2741 
2742 retry:
2743 	if (consume_stock(memcg, nr_pages))
2744 		return 0;
2745 
2746 	if (!do_memsw_account() ||
2747 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2748 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2749 			goto done_restock;
2750 		if (do_memsw_account())
2751 			page_counter_uncharge(&memcg->memsw, batch);
2752 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2753 	} else {
2754 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2755 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2756 	}
2757 
2758 	if (batch > nr_pages) {
2759 		batch = nr_pages;
2760 		goto retry;
2761 	}
2762 
2763 	/*
2764 	 * Prevent unbounded recursion when reclaim operations need to
2765 	 * allocate memory. This might exceed the limits temporarily,
2766 	 * but we prefer facilitating memory reclaim and getting back
2767 	 * under the limit over triggering OOM kills in these cases.
2768 	 */
2769 	if (unlikely(current->flags & PF_MEMALLOC))
2770 		goto force;
2771 
2772 	if (unlikely(task_in_memcg_oom(current)))
2773 		goto nomem;
2774 
2775 	if (!gfpflags_allow_blocking(gfp_mask))
2776 		goto nomem;
2777 
2778 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2779 	raised_max_event = true;
2780 
2781 	psi_memstall_enter(&pflags);
2782 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2783 						    gfp_mask, reclaim_options);
2784 	psi_memstall_leave(&pflags);
2785 
2786 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2787 		goto retry;
2788 
2789 	if (!drained) {
2790 		drain_all_stock(mem_over_limit);
2791 		drained = true;
2792 		goto retry;
2793 	}
2794 
2795 	if (gfp_mask & __GFP_NORETRY)
2796 		goto nomem;
2797 	/*
2798 	 * Even though the limit is exceeded at this point, reclaim
2799 	 * may have been able to free some pages.  Retry the charge
2800 	 * before killing the task.
2801 	 *
2802 	 * Only for regular pages, though: huge pages are rather
2803 	 * unlikely to succeed so close to the limit, and we fall back
2804 	 * to regular pages anyway in case of failure.
2805 	 */
2806 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2807 		goto retry;
2808 	/*
2809 	 * At task move, charge accounts can be doubly counted. So, it's
2810 	 * better to wait until the end of task_move if something is going on.
2811 	 */
2812 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2813 		goto retry;
2814 
2815 	if (nr_retries--)
2816 		goto retry;
2817 
2818 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2819 		goto nomem;
2820 
2821 	/* Avoid endless loop for tasks bypassed by the oom killer */
2822 	if (passed_oom && task_is_dying())
2823 		goto nomem;
2824 
2825 	/*
2826 	 * keep retrying as long as the memcg oom killer is able to make
2827 	 * a forward progress or bypass the charge if the oom killer
2828 	 * couldn't make any progress.
2829 	 */
2830 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2831 			   get_order(nr_pages * PAGE_SIZE))) {
2832 		passed_oom = true;
2833 		nr_retries = MAX_RECLAIM_RETRIES;
2834 		goto retry;
2835 	}
2836 nomem:
2837 	/*
2838 	 * Memcg doesn't have a dedicated reserve for atomic
2839 	 * allocations. But like the global atomic pool, we need to
2840 	 * put the burden of reclaim on regular allocation requests
2841 	 * and let these go through as privileged allocations.
2842 	 */
2843 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2844 		return -ENOMEM;
2845 force:
2846 	/*
2847 	 * If the allocation has to be enforced, don't forget to raise
2848 	 * a MEMCG_MAX event.
2849 	 */
2850 	if (!raised_max_event)
2851 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2852 
2853 	/*
2854 	 * The allocation either can't fail or will lead to more memory
2855 	 * being freed very soon.  Allow memory usage go over the limit
2856 	 * temporarily by force charging it.
2857 	 */
2858 	page_counter_charge(&memcg->memory, nr_pages);
2859 	if (do_memsw_account())
2860 		page_counter_charge(&memcg->memsw, nr_pages);
2861 
2862 	return 0;
2863 
2864 done_restock:
2865 	if (batch > nr_pages)
2866 		refill_stock(memcg, batch - nr_pages);
2867 
2868 	/*
2869 	 * If the hierarchy is above the normal consumption range, schedule
2870 	 * reclaim on returning to userland.  We can perform reclaim here
2871 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2872 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2873 	 * not recorded as it most likely matches current's and won't
2874 	 * change in the meantime.  As high limit is checked again before
2875 	 * reclaim, the cost of mismatch is negligible.
2876 	 */
2877 	do {
2878 		bool mem_high, swap_high;
2879 
2880 		mem_high = page_counter_read(&memcg->memory) >
2881 			READ_ONCE(memcg->memory.high);
2882 		swap_high = page_counter_read(&memcg->swap) >
2883 			READ_ONCE(memcg->swap.high);
2884 
2885 		/* Don't bother a random interrupted task */
2886 		if (!in_task()) {
2887 			if (mem_high) {
2888 				schedule_work(&memcg->high_work);
2889 				break;
2890 			}
2891 			continue;
2892 		}
2893 
2894 		if (mem_high || swap_high) {
2895 			/*
2896 			 * The allocating tasks in this cgroup will need to do
2897 			 * reclaim or be throttled to prevent further growth
2898 			 * of the memory or swap footprints.
2899 			 *
2900 			 * Target some best-effort fairness between the tasks,
2901 			 * and distribute reclaim work and delay penalties
2902 			 * based on how much each task is actually allocating.
2903 			 */
2904 			current->memcg_nr_pages_over_high += batch;
2905 			set_notify_resume(current);
2906 			break;
2907 		}
2908 	} while ((memcg = parent_mem_cgroup(memcg)));
2909 
2910 	/*
2911 	 * Reclaim is set up above to be called from the userland
2912 	 * return path. But also attempt synchronous reclaim to avoid
2913 	 * excessive overrun while the task is still inside the
2914 	 * kernel. If this is successful, the return path will see it
2915 	 * when it rechecks the overage and simply bail out.
2916 	 */
2917 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2918 	    !(current->flags & PF_MEMALLOC) &&
2919 	    gfpflags_allow_blocking(gfp_mask))
2920 		mem_cgroup_handle_over_high(gfp_mask);
2921 	return 0;
2922 }
2923 
2924 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2925 			     unsigned int nr_pages)
2926 {
2927 	if (mem_cgroup_is_root(memcg))
2928 		return 0;
2929 
2930 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2931 }
2932 
2933 /**
2934  * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2935  * @memcg: memcg previously charged.
2936  * @nr_pages: number of pages previously charged.
2937  */
2938 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2939 {
2940 	if (mem_cgroup_is_root(memcg))
2941 		return;
2942 
2943 	page_counter_uncharge(&memcg->memory, nr_pages);
2944 	if (do_memsw_account())
2945 		page_counter_uncharge(&memcg->memsw, nr_pages);
2946 }
2947 
2948 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2949 {
2950 	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2951 	/*
2952 	 * Any of the following ensures page's memcg stability:
2953 	 *
2954 	 * - the page lock
2955 	 * - LRU isolation
2956 	 * - folio_memcg_lock()
2957 	 * - exclusive reference
2958 	 * - mem_cgroup_trylock_pages()
2959 	 */
2960 	folio->memcg_data = (unsigned long)memcg;
2961 }
2962 
2963 /**
2964  * mem_cgroup_commit_charge - commit a previously successful try_charge().
2965  * @folio: folio to commit the charge to.
2966  * @memcg: memcg previously charged.
2967  */
2968 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2969 {
2970 	css_get(&memcg->css);
2971 	commit_charge(folio, memcg);
2972 
2973 	local_irq_disable();
2974 	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2975 	memcg_check_events(memcg, folio_nid(folio));
2976 	local_irq_enable();
2977 }
2978 
2979 #ifdef CONFIG_MEMCG_KMEM
2980 /*
2981  * The allocated objcg pointers array is not accounted directly.
2982  * Moreover, it should not come from DMA buffer and is not readily
2983  * reclaimable. So those GFP bits should be masked off.
2984  */
2985 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2986 				 __GFP_ACCOUNT | __GFP_NOFAIL)
2987 
2988 /*
2989  * mod_objcg_mlstate() may be called with irq enabled, so
2990  * mod_memcg_lruvec_state() should be used.
2991  */
2992 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2993 				     struct pglist_data *pgdat,
2994 				     enum node_stat_item idx, int nr)
2995 {
2996 	struct mem_cgroup *memcg;
2997 	struct lruvec *lruvec;
2998 
2999 	rcu_read_lock();
3000 	memcg = obj_cgroup_memcg(objcg);
3001 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
3002 	mod_memcg_lruvec_state(lruvec, idx, nr);
3003 	rcu_read_unlock();
3004 }
3005 
3006 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3007 				 gfp_t gfp, bool new_slab)
3008 {
3009 	unsigned int objects = objs_per_slab(s, slab);
3010 	unsigned long memcg_data;
3011 	void *vec;
3012 
3013 	gfp &= ~OBJCGS_CLEAR_MASK;
3014 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3015 			   slab_nid(slab));
3016 	if (!vec)
3017 		return -ENOMEM;
3018 
3019 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3020 	if (new_slab) {
3021 		/*
3022 		 * If the slab is brand new and nobody can yet access its
3023 		 * memcg_data, no synchronization is required and memcg_data can
3024 		 * be simply assigned.
3025 		 */
3026 		slab->memcg_data = memcg_data;
3027 	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3028 		/*
3029 		 * If the slab is already in use, somebody can allocate and
3030 		 * assign obj_cgroups in parallel. In this case the existing
3031 		 * objcg vector should be reused.
3032 		 */
3033 		kfree(vec);
3034 		return 0;
3035 	}
3036 
3037 	kmemleak_not_leak(vec);
3038 	return 0;
3039 }
3040 
3041 static __always_inline
3042 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3043 {
3044 	/*
3045 	 * Slab objects are accounted individually, not per-page.
3046 	 * Memcg membership data for each individual object is saved in
3047 	 * slab->memcg_data.
3048 	 */
3049 	if (folio_test_slab(folio)) {
3050 		struct obj_cgroup **objcgs;
3051 		struct slab *slab;
3052 		unsigned int off;
3053 
3054 		slab = folio_slab(folio);
3055 		objcgs = slab_objcgs(slab);
3056 		if (!objcgs)
3057 			return NULL;
3058 
3059 		off = obj_to_index(slab->slab_cache, slab, p);
3060 		if (objcgs[off])
3061 			return obj_cgroup_memcg(objcgs[off]);
3062 
3063 		return NULL;
3064 	}
3065 
3066 	/*
3067 	 * folio_memcg_check() is used here, because in theory we can encounter
3068 	 * a folio where the slab flag has been cleared already, but
3069 	 * slab->memcg_data has not been freed yet
3070 	 * folio_memcg_check() will guarantee that a proper memory
3071 	 * cgroup pointer or NULL will be returned.
3072 	 */
3073 	return folio_memcg_check(folio);
3074 }
3075 
3076 /*
3077  * Returns a pointer to the memory cgroup to which the kernel object is charged.
3078  *
3079  * A passed kernel object can be a slab object, vmalloc object or a generic
3080  * kernel page, so different mechanisms for getting the memory cgroup pointer
3081  * should be used.
3082  *
3083  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3084  * can not know for sure how the kernel object is implemented.
3085  * mem_cgroup_from_obj() can be safely used in such cases.
3086  *
3087  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3088  * cgroup_mutex, etc.
3089  */
3090 struct mem_cgroup *mem_cgroup_from_obj(void *p)
3091 {
3092 	struct folio *folio;
3093 
3094 	if (mem_cgroup_disabled())
3095 		return NULL;
3096 
3097 	if (unlikely(is_vmalloc_addr(p)))
3098 		folio = page_folio(vmalloc_to_page(p));
3099 	else
3100 		folio = virt_to_folio(p);
3101 
3102 	return mem_cgroup_from_obj_folio(folio, p);
3103 }
3104 
3105 /*
3106  * Returns a pointer to the memory cgroup to which the kernel object is charged.
3107  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3108  * allocated using vmalloc().
3109  *
3110  * A passed kernel object must be a slab object or a generic kernel page.
3111  *
3112  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3113  * cgroup_mutex, etc.
3114  */
3115 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3116 {
3117 	if (mem_cgroup_disabled())
3118 		return NULL;
3119 
3120 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3121 }
3122 
3123 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3124 {
3125 	struct obj_cgroup *objcg = NULL;
3126 
3127 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3128 		objcg = rcu_dereference(memcg->objcg);
3129 		if (likely(objcg && obj_cgroup_tryget(objcg)))
3130 			break;
3131 		objcg = NULL;
3132 	}
3133 	return objcg;
3134 }
3135 
3136 static struct obj_cgroup *current_objcg_update(void)
3137 {
3138 	struct mem_cgroup *memcg;
3139 	struct obj_cgroup *old, *objcg = NULL;
3140 
3141 	do {
3142 		/* Atomically drop the update bit. */
3143 		old = xchg(&current->objcg, NULL);
3144 		if (old) {
3145 			old = (struct obj_cgroup *)
3146 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3147 			if (old)
3148 				obj_cgroup_put(old);
3149 
3150 			old = NULL;
3151 		}
3152 
3153 		/* If new objcg is NULL, no reason for the second atomic update. */
3154 		if (!current->mm || (current->flags & PF_KTHREAD))
3155 			return NULL;
3156 
3157 		/*
3158 		 * Release the objcg pointer from the previous iteration,
3159 		 * if try_cmpxcg() below fails.
3160 		 */
3161 		if (unlikely(objcg)) {
3162 			obj_cgroup_put(objcg);
3163 			objcg = NULL;
3164 		}
3165 
3166 		/*
3167 		 * Obtain the new objcg pointer. The current task can be
3168 		 * asynchronously moved to another memcg and the previous
3169 		 * memcg can be offlined. So let's get the memcg pointer
3170 		 * and try get a reference to objcg under a rcu read lock.
3171 		 */
3172 
3173 		rcu_read_lock();
3174 		memcg = mem_cgroup_from_task(current);
3175 		objcg = __get_obj_cgroup_from_memcg(memcg);
3176 		rcu_read_unlock();
3177 
3178 		/*
3179 		 * Try set up a new objcg pointer atomically. If it
3180 		 * fails, it means the update flag was set concurrently, so
3181 		 * the whole procedure should be repeated.
3182 		 */
3183 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
3184 
3185 	return objcg;
3186 }
3187 
3188 __always_inline struct obj_cgroup *current_obj_cgroup(void)
3189 {
3190 	struct mem_cgroup *memcg;
3191 	struct obj_cgroup *objcg;
3192 
3193 	if (in_task()) {
3194 		memcg = current->active_memcg;
3195 		if (unlikely(memcg))
3196 			goto from_memcg;
3197 
3198 		objcg = READ_ONCE(current->objcg);
3199 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3200 			objcg = current_objcg_update();
3201 		/*
3202 		 * Objcg reference is kept by the task, so it's safe
3203 		 * to use the objcg by the current task.
3204 		 */
3205 		return objcg;
3206 	}
3207 
3208 	memcg = this_cpu_read(int_active_memcg);
3209 	if (unlikely(memcg))
3210 		goto from_memcg;
3211 
3212 	return NULL;
3213 
3214 from_memcg:
3215 	objcg = NULL;
3216 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3217 		/*
3218 		 * Memcg pointer is protected by scope (see set_active_memcg())
3219 		 * and is pinning the corresponding objcg, so objcg can't go
3220 		 * away and can be used within the scope without any additional
3221 		 * protection.
3222 		 */
3223 		objcg = rcu_dereference_check(memcg->objcg, 1);
3224 		if (likely(objcg))
3225 			break;
3226 	}
3227 
3228 	return objcg;
3229 }
3230 
3231 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3232 {
3233 	struct obj_cgroup *objcg;
3234 
3235 	if (!memcg_kmem_online())
3236 		return NULL;
3237 
3238 	if (folio_memcg_kmem(folio)) {
3239 		objcg = __folio_objcg(folio);
3240 		obj_cgroup_get(objcg);
3241 	} else {
3242 		struct mem_cgroup *memcg;
3243 
3244 		rcu_read_lock();
3245 		memcg = __folio_memcg(folio);
3246 		if (memcg)
3247 			objcg = __get_obj_cgroup_from_memcg(memcg);
3248 		else
3249 			objcg = NULL;
3250 		rcu_read_unlock();
3251 	}
3252 	return objcg;
3253 }
3254 
3255 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3256 {
3257 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3258 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3259 		if (nr_pages > 0)
3260 			page_counter_charge(&memcg->kmem, nr_pages);
3261 		else
3262 			page_counter_uncharge(&memcg->kmem, -nr_pages);
3263 	}
3264 }
3265 
3266 
3267 /*
3268  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3269  * @objcg: object cgroup to uncharge
3270  * @nr_pages: number of pages to uncharge
3271  */
3272 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3273 				      unsigned int nr_pages)
3274 {
3275 	struct mem_cgroup *memcg;
3276 
3277 	memcg = get_mem_cgroup_from_objcg(objcg);
3278 
3279 	memcg_account_kmem(memcg, -nr_pages);
3280 	refill_stock(memcg, nr_pages);
3281 
3282 	css_put(&memcg->css);
3283 }
3284 
3285 /*
3286  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3287  * @objcg: object cgroup to charge
3288  * @gfp: reclaim mode
3289  * @nr_pages: number of pages to charge
3290  *
3291  * Returns 0 on success, an error code on failure.
3292  */
3293 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3294 				   unsigned int nr_pages)
3295 {
3296 	struct mem_cgroup *memcg;
3297 	int ret;
3298 
3299 	memcg = get_mem_cgroup_from_objcg(objcg);
3300 
3301 	ret = try_charge_memcg(memcg, gfp, nr_pages);
3302 	if (ret)
3303 		goto out;
3304 
3305 	memcg_account_kmem(memcg, nr_pages);
3306 out:
3307 	css_put(&memcg->css);
3308 
3309 	return ret;
3310 }
3311 
3312 /**
3313  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3314  * @page: page to charge
3315  * @gfp: reclaim mode
3316  * @order: allocation order
3317  *
3318  * Returns 0 on success, an error code on failure.
3319  */
3320 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3321 {
3322 	struct obj_cgroup *objcg;
3323 	int ret = 0;
3324 
3325 	objcg = current_obj_cgroup();
3326 	if (objcg) {
3327 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3328 		if (!ret) {
3329 			obj_cgroup_get(objcg);
3330 			page->memcg_data = (unsigned long)objcg |
3331 				MEMCG_DATA_KMEM;
3332 			return 0;
3333 		}
3334 	}
3335 	return ret;
3336 }
3337 
3338 /**
3339  * __memcg_kmem_uncharge_page: uncharge a kmem page
3340  * @page: page to uncharge
3341  * @order: allocation order
3342  */
3343 void __memcg_kmem_uncharge_page(struct page *page, int order)
3344 {
3345 	struct folio *folio = page_folio(page);
3346 	struct obj_cgroup *objcg;
3347 	unsigned int nr_pages = 1 << order;
3348 
3349 	if (!folio_memcg_kmem(folio))
3350 		return;
3351 
3352 	objcg = __folio_objcg(folio);
3353 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3354 	folio->memcg_data = 0;
3355 	obj_cgroup_put(objcg);
3356 }
3357 
3358 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3359 		     enum node_stat_item idx, int nr)
3360 {
3361 	struct memcg_stock_pcp *stock;
3362 	struct obj_cgroup *old = NULL;
3363 	unsigned long flags;
3364 	int *bytes;
3365 
3366 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3367 	stock = this_cpu_ptr(&memcg_stock);
3368 
3369 	/*
3370 	 * Save vmstat data in stock and skip vmstat array update unless
3371 	 * accumulating over a page of vmstat data or when pgdat or idx
3372 	 * changes.
3373 	 */
3374 	if (READ_ONCE(stock->cached_objcg) != objcg) {
3375 		old = drain_obj_stock(stock);
3376 		obj_cgroup_get(objcg);
3377 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3378 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3379 		WRITE_ONCE(stock->cached_objcg, objcg);
3380 		stock->cached_pgdat = pgdat;
3381 	} else if (stock->cached_pgdat != pgdat) {
3382 		/* Flush the existing cached vmstat data */
3383 		struct pglist_data *oldpg = stock->cached_pgdat;
3384 
3385 		if (stock->nr_slab_reclaimable_b) {
3386 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3387 					  stock->nr_slab_reclaimable_b);
3388 			stock->nr_slab_reclaimable_b = 0;
3389 		}
3390 		if (stock->nr_slab_unreclaimable_b) {
3391 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3392 					  stock->nr_slab_unreclaimable_b);
3393 			stock->nr_slab_unreclaimable_b = 0;
3394 		}
3395 		stock->cached_pgdat = pgdat;
3396 	}
3397 
3398 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3399 					       : &stock->nr_slab_unreclaimable_b;
3400 	/*
3401 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3402 	 * cached locally at least once before pushing it out.
3403 	 */
3404 	if (!*bytes) {
3405 		*bytes = nr;
3406 		nr = 0;
3407 	} else {
3408 		*bytes += nr;
3409 		if (abs(*bytes) > PAGE_SIZE) {
3410 			nr = *bytes;
3411 			*bytes = 0;
3412 		} else {
3413 			nr = 0;
3414 		}
3415 	}
3416 	if (nr)
3417 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3418 
3419 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3420 	if (old)
3421 		obj_cgroup_put(old);
3422 }
3423 
3424 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3425 {
3426 	struct memcg_stock_pcp *stock;
3427 	unsigned long flags;
3428 	bool ret = false;
3429 
3430 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3431 
3432 	stock = this_cpu_ptr(&memcg_stock);
3433 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3434 		stock->nr_bytes -= nr_bytes;
3435 		ret = true;
3436 	}
3437 
3438 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3439 
3440 	return ret;
3441 }
3442 
3443 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3444 {
3445 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3446 
3447 	if (!old)
3448 		return NULL;
3449 
3450 	if (stock->nr_bytes) {
3451 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3452 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3453 
3454 		if (nr_pages) {
3455 			struct mem_cgroup *memcg;
3456 
3457 			memcg = get_mem_cgroup_from_objcg(old);
3458 
3459 			memcg_account_kmem(memcg, -nr_pages);
3460 			__refill_stock(memcg, nr_pages);
3461 
3462 			css_put(&memcg->css);
3463 		}
3464 
3465 		/*
3466 		 * The leftover is flushed to the centralized per-memcg value.
3467 		 * On the next attempt to refill obj stock it will be moved
3468 		 * to a per-cpu stock (probably, on an other CPU), see
3469 		 * refill_obj_stock().
3470 		 *
3471 		 * How often it's flushed is a trade-off between the memory
3472 		 * limit enforcement accuracy and potential CPU contention,
3473 		 * so it might be changed in the future.
3474 		 */
3475 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3476 		stock->nr_bytes = 0;
3477 	}
3478 
3479 	/*
3480 	 * Flush the vmstat data in current stock
3481 	 */
3482 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3483 		if (stock->nr_slab_reclaimable_b) {
3484 			mod_objcg_mlstate(old, stock->cached_pgdat,
3485 					  NR_SLAB_RECLAIMABLE_B,
3486 					  stock->nr_slab_reclaimable_b);
3487 			stock->nr_slab_reclaimable_b = 0;
3488 		}
3489 		if (stock->nr_slab_unreclaimable_b) {
3490 			mod_objcg_mlstate(old, stock->cached_pgdat,
3491 					  NR_SLAB_UNRECLAIMABLE_B,
3492 					  stock->nr_slab_unreclaimable_b);
3493 			stock->nr_slab_unreclaimable_b = 0;
3494 		}
3495 		stock->cached_pgdat = NULL;
3496 	}
3497 
3498 	WRITE_ONCE(stock->cached_objcg, NULL);
3499 	/*
3500 	 * The `old' objects needs to be released by the caller via
3501 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3502 	 */
3503 	return old;
3504 }
3505 
3506 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3507 				     struct mem_cgroup *root_memcg)
3508 {
3509 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3510 	struct mem_cgroup *memcg;
3511 
3512 	if (objcg) {
3513 		memcg = obj_cgroup_memcg(objcg);
3514 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3515 			return true;
3516 	}
3517 
3518 	return false;
3519 }
3520 
3521 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3522 			     bool allow_uncharge)
3523 {
3524 	struct memcg_stock_pcp *stock;
3525 	struct obj_cgroup *old = NULL;
3526 	unsigned long flags;
3527 	unsigned int nr_pages = 0;
3528 
3529 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3530 
3531 	stock = this_cpu_ptr(&memcg_stock);
3532 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3533 		old = drain_obj_stock(stock);
3534 		obj_cgroup_get(objcg);
3535 		WRITE_ONCE(stock->cached_objcg, objcg);
3536 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3537 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3538 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3539 	}
3540 	stock->nr_bytes += nr_bytes;
3541 
3542 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3543 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3544 		stock->nr_bytes &= (PAGE_SIZE - 1);
3545 	}
3546 
3547 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3548 	if (old)
3549 		obj_cgroup_put(old);
3550 
3551 	if (nr_pages)
3552 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3553 }
3554 
3555 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3556 {
3557 	unsigned int nr_pages, nr_bytes;
3558 	int ret;
3559 
3560 	if (consume_obj_stock(objcg, size))
3561 		return 0;
3562 
3563 	/*
3564 	 * In theory, objcg->nr_charged_bytes can have enough
3565 	 * pre-charged bytes to satisfy the allocation. However,
3566 	 * flushing objcg->nr_charged_bytes requires two atomic
3567 	 * operations, and objcg->nr_charged_bytes can't be big.
3568 	 * The shared objcg->nr_charged_bytes can also become a
3569 	 * performance bottleneck if all tasks of the same memcg are
3570 	 * trying to update it. So it's better to ignore it and try
3571 	 * grab some new pages. The stock's nr_bytes will be flushed to
3572 	 * objcg->nr_charged_bytes later on when objcg changes.
3573 	 *
3574 	 * The stock's nr_bytes may contain enough pre-charged bytes
3575 	 * to allow one less page from being charged, but we can't rely
3576 	 * on the pre-charged bytes not being changed outside of
3577 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3578 	 * pre-charged bytes as well when charging pages. To avoid a
3579 	 * page uncharge right after a page charge, we set the
3580 	 * allow_uncharge flag to false when calling refill_obj_stock()
3581 	 * to temporarily allow the pre-charged bytes to exceed the page
3582 	 * size limit. The maximum reachable value of the pre-charged
3583 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3584 	 * race.
3585 	 */
3586 	nr_pages = size >> PAGE_SHIFT;
3587 	nr_bytes = size & (PAGE_SIZE - 1);
3588 
3589 	if (nr_bytes)
3590 		nr_pages += 1;
3591 
3592 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3593 	if (!ret && nr_bytes)
3594 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3595 
3596 	return ret;
3597 }
3598 
3599 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3600 {
3601 	refill_obj_stock(objcg, size, true);
3602 }
3603 
3604 #endif /* CONFIG_MEMCG_KMEM */
3605 
3606 /*
3607  * Because page_memcg(head) is not set on tails, set it now.
3608  */
3609 void split_page_memcg(struct page *head, unsigned int nr)
3610 {
3611 	struct folio *folio = page_folio(head);
3612 	struct mem_cgroup *memcg = folio_memcg(folio);
3613 	int i;
3614 
3615 	if (mem_cgroup_disabled() || !memcg)
3616 		return;
3617 
3618 	for (i = 1; i < nr; i++)
3619 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3620 
3621 	if (folio_memcg_kmem(folio))
3622 		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3623 	else
3624 		css_get_many(&memcg->css, nr - 1);
3625 }
3626 
3627 #ifdef CONFIG_SWAP
3628 /**
3629  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3630  * @entry: swap entry to be moved
3631  * @from:  mem_cgroup which the entry is moved from
3632  * @to:  mem_cgroup which the entry is moved to
3633  *
3634  * It succeeds only when the swap_cgroup's record for this entry is the same
3635  * as the mem_cgroup's id of @from.
3636  *
3637  * Returns 0 on success, -EINVAL on failure.
3638  *
3639  * The caller must have charged to @to, IOW, called page_counter_charge() about
3640  * both res and memsw, and called css_get().
3641  */
3642 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3643 				struct mem_cgroup *from, struct mem_cgroup *to)
3644 {
3645 	unsigned short old_id, new_id;
3646 
3647 	old_id = mem_cgroup_id(from);
3648 	new_id = mem_cgroup_id(to);
3649 
3650 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3651 		mod_memcg_state(from, MEMCG_SWAP, -1);
3652 		mod_memcg_state(to, MEMCG_SWAP, 1);
3653 		return 0;
3654 	}
3655 	return -EINVAL;
3656 }
3657 #else
3658 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3659 				struct mem_cgroup *from, struct mem_cgroup *to)
3660 {
3661 	return -EINVAL;
3662 }
3663 #endif
3664 
3665 static DEFINE_MUTEX(memcg_max_mutex);
3666 
3667 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3668 				 unsigned long max, bool memsw)
3669 {
3670 	bool enlarge = false;
3671 	bool drained = false;
3672 	int ret;
3673 	bool limits_invariant;
3674 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3675 
3676 	do {
3677 		if (signal_pending(current)) {
3678 			ret = -EINTR;
3679 			break;
3680 		}
3681 
3682 		mutex_lock(&memcg_max_mutex);
3683 		/*
3684 		 * Make sure that the new limit (memsw or memory limit) doesn't
3685 		 * break our basic invariant rule memory.max <= memsw.max.
3686 		 */
3687 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3688 					   max <= memcg->memsw.max;
3689 		if (!limits_invariant) {
3690 			mutex_unlock(&memcg_max_mutex);
3691 			ret = -EINVAL;
3692 			break;
3693 		}
3694 		if (max > counter->max)
3695 			enlarge = true;
3696 		ret = page_counter_set_max(counter, max);
3697 		mutex_unlock(&memcg_max_mutex);
3698 
3699 		if (!ret)
3700 			break;
3701 
3702 		if (!drained) {
3703 			drain_all_stock(memcg);
3704 			drained = true;
3705 			continue;
3706 		}
3707 
3708 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3709 					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3710 			ret = -EBUSY;
3711 			break;
3712 		}
3713 	} while (true);
3714 
3715 	if (!ret && enlarge)
3716 		memcg_oom_recover(memcg);
3717 
3718 	return ret;
3719 }
3720 
3721 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3722 					    gfp_t gfp_mask,
3723 					    unsigned long *total_scanned)
3724 {
3725 	unsigned long nr_reclaimed = 0;
3726 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3727 	unsigned long reclaimed;
3728 	int loop = 0;
3729 	struct mem_cgroup_tree_per_node *mctz;
3730 	unsigned long excess;
3731 
3732 	if (lru_gen_enabled())
3733 		return 0;
3734 
3735 	if (order > 0)
3736 		return 0;
3737 
3738 	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3739 
3740 	/*
3741 	 * Do not even bother to check the largest node if the root
3742 	 * is empty. Do it lockless to prevent lock bouncing. Races
3743 	 * are acceptable as soft limit is best effort anyway.
3744 	 */
3745 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3746 		return 0;
3747 
3748 	/*
3749 	 * This loop can run a while, specially if mem_cgroup's continuously
3750 	 * keep exceeding their soft limit and putting the system under
3751 	 * pressure
3752 	 */
3753 	do {
3754 		if (next_mz)
3755 			mz = next_mz;
3756 		else
3757 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3758 		if (!mz)
3759 			break;
3760 
3761 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3762 						    gfp_mask, total_scanned);
3763 		nr_reclaimed += reclaimed;
3764 		spin_lock_irq(&mctz->lock);
3765 
3766 		/*
3767 		 * If we failed to reclaim anything from this memory cgroup
3768 		 * it is time to move on to the next cgroup
3769 		 */
3770 		next_mz = NULL;
3771 		if (!reclaimed)
3772 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3773 
3774 		excess = soft_limit_excess(mz->memcg);
3775 		/*
3776 		 * One school of thought says that we should not add
3777 		 * back the node to the tree if reclaim returns 0.
3778 		 * But our reclaim could return 0, simply because due
3779 		 * to priority we are exposing a smaller subset of
3780 		 * memory to reclaim from. Consider this as a longer
3781 		 * term TODO.
3782 		 */
3783 		/* If excess == 0, no tree ops */
3784 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3785 		spin_unlock_irq(&mctz->lock);
3786 		css_put(&mz->memcg->css);
3787 		loop++;
3788 		/*
3789 		 * Could not reclaim anything and there are no more
3790 		 * mem cgroups to try or we seem to be looping without
3791 		 * reclaiming anything.
3792 		 */
3793 		if (!nr_reclaimed &&
3794 			(next_mz == NULL ||
3795 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3796 			break;
3797 	} while (!nr_reclaimed);
3798 	if (next_mz)
3799 		css_put(&next_mz->memcg->css);
3800 	return nr_reclaimed;
3801 }
3802 
3803 /*
3804  * Reclaims as many pages from the given memcg as possible.
3805  *
3806  * Caller is responsible for holding css reference for memcg.
3807  */
3808 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3809 {
3810 	int nr_retries = MAX_RECLAIM_RETRIES;
3811 
3812 	/* we call try-to-free pages for make this cgroup empty */
3813 	lru_add_drain_all();
3814 
3815 	drain_all_stock(memcg);
3816 
3817 	/* try to free all pages in this cgroup */
3818 	while (nr_retries && page_counter_read(&memcg->memory)) {
3819 		if (signal_pending(current))
3820 			return -EINTR;
3821 
3822 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3823 						  MEMCG_RECLAIM_MAY_SWAP))
3824 			nr_retries--;
3825 	}
3826 
3827 	return 0;
3828 }
3829 
3830 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3831 					    char *buf, size_t nbytes,
3832 					    loff_t off)
3833 {
3834 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3835 
3836 	if (mem_cgroup_is_root(memcg))
3837 		return -EINVAL;
3838 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3839 }
3840 
3841 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3842 				     struct cftype *cft)
3843 {
3844 	return 1;
3845 }
3846 
3847 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3848 				      struct cftype *cft, u64 val)
3849 {
3850 	if (val == 1)
3851 		return 0;
3852 
3853 	pr_warn_once("Non-hierarchical mode is deprecated. "
3854 		     "Please report your usecase to linux-mm@kvack.org if you "
3855 		     "depend on this functionality.\n");
3856 
3857 	return -EINVAL;
3858 }
3859 
3860 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3861 {
3862 	unsigned long val;
3863 
3864 	if (mem_cgroup_is_root(memcg)) {
3865 		/*
3866 		 * Approximate root's usage from global state. This isn't
3867 		 * perfect, but the root usage was always an approximation.
3868 		 */
3869 		val = global_node_page_state(NR_FILE_PAGES) +
3870 			global_node_page_state(NR_ANON_MAPPED);
3871 		if (swap)
3872 			val += total_swap_pages - get_nr_swap_pages();
3873 	} else {
3874 		if (!swap)
3875 			val = page_counter_read(&memcg->memory);
3876 		else
3877 			val = page_counter_read(&memcg->memsw);
3878 	}
3879 	return val;
3880 }
3881 
3882 enum {
3883 	RES_USAGE,
3884 	RES_LIMIT,
3885 	RES_MAX_USAGE,
3886 	RES_FAILCNT,
3887 	RES_SOFT_LIMIT,
3888 };
3889 
3890 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3891 			       struct cftype *cft)
3892 {
3893 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3894 	struct page_counter *counter;
3895 
3896 	switch (MEMFILE_TYPE(cft->private)) {
3897 	case _MEM:
3898 		counter = &memcg->memory;
3899 		break;
3900 	case _MEMSWAP:
3901 		counter = &memcg->memsw;
3902 		break;
3903 	case _KMEM:
3904 		counter = &memcg->kmem;
3905 		break;
3906 	case _TCP:
3907 		counter = &memcg->tcpmem;
3908 		break;
3909 	default:
3910 		BUG();
3911 	}
3912 
3913 	switch (MEMFILE_ATTR(cft->private)) {
3914 	case RES_USAGE:
3915 		if (counter == &memcg->memory)
3916 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3917 		if (counter == &memcg->memsw)
3918 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3919 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3920 	case RES_LIMIT:
3921 		return (u64)counter->max * PAGE_SIZE;
3922 	case RES_MAX_USAGE:
3923 		return (u64)counter->watermark * PAGE_SIZE;
3924 	case RES_FAILCNT:
3925 		return counter->failcnt;
3926 	case RES_SOFT_LIMIT:
3927 		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3928 	default:
3929 		BUG();
3930 	}
3931 }
3932 
3933 /*
3934  * This function doesn't do anything useful. Its only job is to provide a read
3935  * handler for a file so that cgroup_file_mode() will add read permissions.
3936  */
3937 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3938 				     __always_unused void *v)
3939 {
3940 	return -EINVAL;
3941 }
3942 
3943 #ifdef CONFIG_MEMCG_KMEM
3944 static int memcg_online_kmem(struct mem_cgroup *memcg)
3945 {
3946 	struct obj_cgroup *objcg;
3947 
3948 	if (mem_cgroup_kmem_disabled())
3949 		return 0;
3950 
3951 	if (unlikely(mem_cgroup_is_root(memcg)))
3952 		return 0;
3953 
3954 	objcg = obj_cgroup_alloc();
3955 	if (!objcg)
3956 		return -ENOMEM;
3957 
3958 	objcg->memcg = memcg;
3959 	rcu_assign_pointer(memcg->objcg, objcg);
3960 	obj_cgroup_get(objcg);
3961 	memcg->orig_objcg = objcg;
3962 
3963 	static_branch_enable(&memcg_kmem_online_key);
3964 
3965 	memcg->kmemcg_id = memcg->id.id;
3966 
3967 	return 0;
3968 }
3969 
3970 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3971 {
3972 	struct mem_cgroup *parent;
3973 
3974 	if (mem_cgroup_kmem_disabled())
3975 		return;
3976 
3977 	if (unlikely(mem_cgroup_is_root(memcg)))
3978 		return;
3979 
3980 	parent = parent_mem_cgroup(memcg);
3981 	if (!parent)
3982 		parent = root_mem_cgroup;
3983 
3984 	memcg_reparent_objcgs(memcg, parent);
3985 
3986 	/*
3987 	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3988 	 * corresponding to this cgroup are guaranteed to remain empty.
3989 	 * The ordering is imposed by list_lru_node->lock taken by
3990 	 * memcg_reparent_list_lrus().
3991 	 */
3992 	memcg_reparent_list_lrus(memcg, parent);
3993 }
3994 #else
3995 static int memcg_online_kmem(struct mem_cgroup *memcg)
3996 {
3997 	return 0;
3998 }
3999 static void memcg_offline_kmem(struct mem_cgroup *memcg)
4000 {
4001 }
4002 #endif /* CONFIG_MEMCG_KMEM */
4003 
4004 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4005 {
4006 	int ret;
4007 
4008 	mutex_lock(&memcg_max_mutex);
4009 
4010 	ret = page_counter_set_max(&memcg->tcpmem, max);
4011 	if (ret)
4012 		goto out;
4013 
4014 	if (!memcg->tcpmem_active) {
4015 		/*
4016 		 * The active flag needs to be written after the static_key
4017 		 * update. This is what guarantees that the socket activation
4018 		 * function is the last one to run. See mem_cgroup_sk_alloc()
4019 		 * for details, and note that we don't mark any socket as
4020 		 * belonging to this memcg until that flag is up.
4021 		 *
4022 		 * We need to do this, because static_keys will span multiple
4023 		 * sites, but we can't control their order. If we mark a socket
4024 		 * as accounted, but the accounting functions are not patched in
4025 		 * yet, we'll lose accounting.
4026 		 *
4027 		 * We never race with the readers in mem_cgroup_sk_alloc(),
4028 		 * because when this value change, the code to process it is not
4029 		 * patched in yet.
4030 		 */
4031 		static_branch_inc(&memcg_sockets_enabled_key);
4032 		memcg->tcpmem_active = true;
4033 	}
4034 out:
4035 	mutex_unlock(&memcg_max_mutex);
4036 	return ret;
4037 }
4038 
4039 /*
4040  * The user of this function is...
4041  * RES_LIMIT.
4042  */
4043 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4044 				char *buf, size_t nbytes, loff_t off)
4045 {
4046 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4047 	unsigned long nr_pages;
4048 	int ret;
4049 
4050 	buf = strstrip(buf);
4051 	ret = page_counter_memparse(buf, "-1", &nr_pages);
4052 	if (ret)
4053 		return ret;
4054 
4055 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4056 	case RES_LIMIT:
4057 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4058 			ret = -EINVAL;
4059 			break;
4060 		}
4061 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
4062 		case _MEM:
4063 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4064 			break;
4065 		case _MEMSWAP:
4066 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4067 			break;
4068 		case _KMEM:
4069 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4070 				     "Writing any value to this file has no effect. "
4071 				     "Please report your usecase to linux-mm@kvack.org if you "
4072 				     "depend on this functionality.\n");
4073 			ret = 0;
4074 			break;
4075 		case _TCP:
4076 			ret = memcg_update_tcp_max(memcg, nr_pages);
4077 			break;
4078 		}
4079 		break;
4080 	case RES_SOFT_LIMIT:
4081 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4082 			ret = -EOPNOTSUPP;
4083 		} else {
4084 			WRITE_ONCE(memcg->soft_limit, nr_pages);
4085 			ret = 0;
4086 		}
4087 		break;
4088 	}
4089 	return ret ?: nbytes;
4090 }
4091 
4092 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4093 				size_t nbytes, loff_t off)
4094 {
4095 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4096 	struct page_counter *counter;
4097 
4098 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
4099 	case _MEM:
4100 		counter = &memcg->memory;
4101 		break;
4102 	case _MEMSWAP:
4103 		counter = &memcg->memsw;
4104 		break;
4105 	case _KMEM:
4106 		counter = &memcg->kmem;
4107 		break;
4108 	case _TCP:
4109 		counter = &memcg->tcpmem;
4110 		break;
4111 	default:
4112 		BUG();
4113 	}
4114 
4115 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4116 	case RES_MAX_USAGE:
4117 		page_counter_reset_watermark(counter);
4118 		break;
4119 	case RES_FAILCNT:
4120 		counter->failcnt = 0;
4121 		break;
4122 	default:
4123 		BUG();
4124 	}
4125 
4126 	return nbytes;
4127 }
4128 
4129 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4130 					struct cftype *cft)
4131 {
4132 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4133 }
4134 
4135 #ifdef CONFIG_MMU
4136 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4137 					struct cftype *cft, u64 val)
4138 {
4139 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4140 
4141 	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4142 		     "Please report your usecase to linux-mm@kvack.org if you "
4143 		     "depend on this functionality.\n");
4144 
4145 	if (val & ~MOVE_MASK)
4146 		return -EINVAL;
4147 
4148 	/*
4149 	 * No kind of locking is needed in here, because ->can_attach() will
4150 	 * check this value once in the beginning of the process, and then carry
4151 	 * on with stale data. This means that changes to this value will only
4152 	 * affect task migrations starting after the change.
4153 	 */
4154 	memcg->move_charge_at_immigrate = val;
4155 	return 0;
4156 }
4157 #else
4158 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4159 					struct cftype *cft, u64 val)
4160 {
4161 	return -ENOSYS;
4162 }
4163 #endif
4164 
4165 #ifdef CONFIG_NUMA
4166 
4167 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4168 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4169 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4170 
4171 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4172 				int nid, unsigned int lru_mask, bool tree)
4173 {
4174 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4175 	unsigned long nr = 0;
4176 	enum lru_list lru;
4177 
4178 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4179 
4180 	for_each_lru(lru) {
4181 		if (!(BIT(lru) & lru_mask))
4182 			continue;
4183 		if (tree)
4184 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4185 		else
4186 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4187 	}
4188 	return nr;
4189 }
4190 
4191 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4192 					     unsigned int lru_mask,
4193 					     bool tree)
4194 {
4195 	unsigned long nr = 0;
4196 	enum lru_list lru;
4197 
4198 	for_each_lru(lru) {
4199 		if (!(BIT(lru) & lru_mask))
4200 			continue;
4201 		if (tree)
4202 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4203 		else
4204 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4205 	}
4206 	return nr;
4207 }
4208 
4209 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4210 {
4211 	struct numa_stat {
4212 		const char *name;
4213 		unsigned int lru_mask;
4214 	};
4215 
4216 	static const struct numa_stat stats[] = {
4217 		{ "total", LRU_ALL },
4218 		{ "file", LRU_ALL_FILE },
4219 		{ "anon", LRU_ALL_ANON },
4220 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4221 	};
4222 	const struct numa_stat *stat;
4223 	int nid;
4224 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4225 
4226 	mem_cgroup_flush_stats(memcg);
4227 
4228 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4229 		seq_printf(m, "%s=%lu", stat->name,
4230 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4231 						   false));
4232 		for_each_node_state(nid, N_MEMORY)
4233 			seq_printf(m, " N%d=%lu", nid,
4234 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4235 							stat->lru_mask, false));
4236 		seq_putc(m, '\n');
4237 	}
4238 
4239 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4240 
4241 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4242 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4243 						   true));
4244 		for_each_node_state(nid, N_MEMORY)
4245 			seq_printf(m, " N%d=%lu", nid,
4246 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4247 							stat->lru_mask, true));
4248 		seq_putc(m, '\n');
4249 	}
4250 
4251 	return 0;
4252 }
4253 #endif /* CONFIG_NUMA */
4254 
4255 static const unsigned int memcg1_stats[] = {
4256 	NR_FILE_PAGES,
4257 	NR_ANON_MAPPED,
4258 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4259 	NR_ANON_THPS,
4260 #endif
4261 	NR_SHMEM,
4262 	NR_FILE_MAPPED,
4263 	NR_FILE_DIRTY,
4264 	NR_WRITEBACK,
4265 	WORKINGSET_REFAULT_ANON,
4266 	WORKINGSET_REFAULT_FILE,
4267 #ifdef CONFIG_SWAP
4268 	MEMCG_SWAP,
4269 	NR_SWAPCACHE,
4270 #endif
4271 };
4272 
4273 static const char *const memcg1_stat_names[] = {
4274 	"cache",
4275 	"rss",
4276 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4277 	"rss_huge",
4278 #endif
4279 	"shmem",
4280 	"mapped_file",
4281 	"dirty",
4282 	"writeback",
4283 	"workingset_refault_anon",
4284 	"workingset_refault_file",
4285 #ifdef CONFIG_SWAP
4286 	"swap",
4287 	"swapcached",
4288 #endif
4289 };
4290 
4291 /* Universal VM events cgroup1 shows, original sort order */
4292 static const unsigned int memcg1_events[] = {
4293 	PGPGIN,
4294 	PGPGOUT,
4295 	PGFAULT,
4296 	PGMAJFAULT,
4297 };
4298 
4299 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4300 {
4301 	unsigned long memory, memsw;
4302 	struct mem_cgroup *mi;
4303 	unsigned int i;
4304 
4305 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4306 
4307 	mem_cgroup_flush_stats(memcg);
4308 
4309 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4310 		unsigned long nr;
4311 
4312 		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4313 		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
4314 	}
4315 
4316 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4317 		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4318 			       memcg_events_local(memcg, memcg1_events[i]));
4319 
4320 	for (i = 0; i < NR_LRU_LISTS; i++)
4321 		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4322 			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4323 			       PAGE_SIZE);
4324 
4325 	/* Hierarchical information */
4326 	memory = memsw = PAGE_COUNTER_MAX;
4327 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4328 		memory = min(memory, READ_ONCE(mi->memory.max));
4329 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4330 	}
4331 	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4332 		       (u64)memory * PAGE_SIZE);
4333 	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4334 		       (u64)memsw * PAGE_SIZE);
4335 
4336 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4337 		unsigned long nr;
4338 
4339 		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4340 		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4341 			       (u64)nr);
4342 	}
4343 
4344 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4345 		seq_buf_printf(s, "total_%s %llu\n",
4346 			       vm_event_name(memcg1_events[i]),
4347 			       (u64)memcg_events(memcg, memcg1_events[i]));
4348 
4349 	for (i = 0; i < NR_LRU_LISTS; i++)
4350 		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4351 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4352 			       PAGE_SIZE);
4353 
4354 #ifdef CONFIG_DEBUG_VM
4355 	{
4356 		pg_data_t *pgdat;
4357 		struct mem_cgroup_per_node *mz;
4358 		unsigned long anon_cost = 0;
4359 		unsigned long file_cost = 0;
4360 
4361 		for_each_online_pgdat(pgdat) {
4362 			mz = memcg->nodeinfo[pgdat->node_id];
4363 
4364 			anon_cost += mz->lruvec.anon_cost;
4365 			file_cost += mz->lruvec.file_cost;
4366 		}
4367 		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4368 		seq_buf_printf(s, "file_cost %lu\n", file_cost);
4369 	}
4370 #endif
4371 }
4372 
4373 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4374 				      struct cftype *cft)
4375 {
4376 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4377 
4378 	return mem_cgroup_swappiness(memcg);
4379 }
4380 
4381 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4382 				       struct cftype *cft, u64 val)
4383 {
4384 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4385 
4386 	if (val > 200)
4387 		return -EINVAL;
4388 
4389 	if (!mem_cgroup_is_root(memcg))
4390 		WRITE_ONCE(memcg->swappiness, val);
4391 	else
4392 		WRITE_ONCE(vm_swappiness, val);
4393 
4394 	return 0;
4395 }
4396 
4397 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4398 {
4399 	struct mem_cgroup_threshold_ary *t;
4400 	unsigned long usage;
4401 	int i;
4402 
4403 	rcu_read_lock();
4404 	if (!swap)
4405 		t = rcu_dereference(memcg->thresholds.primary);
4406 	else
4407 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4408 
4409 	if (!t)
4410 		goto unlock;
4411 
4412 	usage = mem_cgroup_usage(memcg, swap);
4413 
4414 	/*
4415 	 * current_threshold points to threshold just below or equal to usage.
4416 	 * If it's not true, a threshold was crossed after last
4417 	 * call of __mem_cgroup_threshold().
4418 	 */
4419 	i = t->current_threshold;
4420 
4421 	/*
4422 	 * Iterate backward over array of thresholds starting from
4423 	 * current_threshold and check if a threshold is crossed.
4424 	 * If none of thresholds below usage is crossed, we read
4425 	 * only one element of the array here.
4426 	 */
4427 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4428 		eventfd_signal(t->entries[i].eventfd);
4429 
4430 	/* i = current_threshold + 1 */
4431 	i++;
4432 
4433 	/*
4434 	 * Iterate forward over array of thresholds starting from
4435 	 * current_threshold+1 and check if a threshold is crossed.
4436 	 * If none of thresholds above usage is crossed, we read
4437 	 * only one element of the array here.
4438 	 */
4439 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4440 		eventfd_signal(t->entries[i].eventfd);
4441 
4442 	/* Update current_threshold */
4443 	t->current_threshold = i - 1;
4444 unlock:
4445 	rcu_read_unlock();
4446 }
4447 
4448 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4449 {
4450 	while (memcg) {
4451 		__mem_cgroup_threshold(memcg, false);
4452 		if (do_memsw_account())
4453 			__mem_cgroup_threshold(memcg, true);
4454 
4455 		memcg = parent_mem_cgroup(memcg);
4456 	}
4457 }
4458 
4459 static int compare_thresholds(const void *a, const void *b)
4460 {
4461 	const struct mem_cgroup_threshold *_a = a;
4462 	const struct mem_cgroup_threshold *_b = b;
4463 
4464 	if (_a->threshold > _b->threshold)
4465 		return 1;
4466 
4467 	if (_a->threshold < _b->threshold)
4468 		return -1;
4469 
4470 	return 0;
4471 }
4472 
4473 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4474 {
4475 	struct mem_cgroup_eventfd_list *ev;
4476 
4477 	spin_lock(&memcg_oom_lock);
4478 
4479 	list_for_each_entry(ev, &memcg->oom_notify, list)
4480 		eventfd_signal(ev->eventfd);
4481 
4482 	spin_unlock(&memcg_oom_lock);
4483 	return 0;
4484 }
4485 
4486 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4487 {
4488 	struct mem_cgroup *iter;
4489 
4490 	for_each_mem_cgroup_tree(iter, memcg)
4491 		mem_cgroup_oom_notify_cb(iter);
4492 }
4493 
4494 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4495 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4496 {
4497 	struct mem_cgroup_thresholds *thresholds;
4498 	struct mem_cgroup_threshold_ary *new;
4499 	unsigned long threshold;
4500 	unsigned long usage;
4501 	int i, size, ret;
4502 
4503 	ret = page_counter_memparse(args, "-1", &threshold);
4504 	if (ret)
4505 		return ret;
4506 
4507 	mutex_lock(&memcg->thresholds_lock);
4508 
4509 	if (type == _MEM) {
4510 		thresholds = &memcg->thresholds;
4511 		usage = mem_cgroup_usage(memcg, false);
4512 	} else if (type == _MEMSWAP) {
4513 		thresholds = &memcg->memsw_thresholds;
4514 		usage = mem_cgroup_usage(memcg, true);
4515 	} else
4516 		BUG();
4517 
4518 	/* Check if a threshold crossed before adding a new one */
4519 	if (thresholds->primary)
4520 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4521 
4522 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4523 
4524 	/* Allocate memory for new array of thresholds */
4525 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4526 	if (!new) {
4527 		ret = -ENOMEM;
4528 		goto unlock;
4529 	}
4530 	new->size = size;
4531 
4532 	/* Copy thresholds (if any) to new array */
4533 	if (thresholds->primary)
4534 		memcpy(new->entries, thresholds->primary->entries,
4535 		       flex_array_size(new, entries, size - 1));
4536 
4537 	/* Add new threshold */
4538 	new->entries[size - 1].eventfd = eventfd;
4539 	new->entries[size - 1].threshold = threshold;
4540 
4541 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4542 	sort(new->entries, size, sizeof(*new->entries),
4543 			compare_thresholds, NULL);
4544 
4545 	/* Find current threshold */
4546 	new->current_threshold = -1;
4547 	for (i = 0; i < size; i++) {
4548 		if (new->entries[i].threshold <= usage) {
4549 			/*
4550 			 * new->current_threshold will not be used until
4551 			 * rcu_assign_pointer(), so it's safe to increment
4552 			 * it here.
4553 			 */
4554 			++new->current_threshold;
4555 		} else
4556 			break;
4557 	}
4558 
4559 	/* Free old spare buffer and save old primary buffer as spare */
4560 	kfree(thresholds->spare);
4561 	thresholds->spare = thresholds->primary;
4562 
4563 	rcu_assign_pointer(thresholds->primary, new);
4564 
4565 	/* To be sure that nobody uses thresholds */
4566 	synchronize_rcu();
4567 
4568 unlock:
4569 	mutex_unlock(&memcg->thresholds_lock);
4570 
4571 	return ret;
4572 }
4573 
4574 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4575 	struct eventfd_ctx *eventfd, const char *args)
4576 {
4577 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4578 }
4579 
4580 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4581 	struct eventfd_ctx *eventfd, const char *args)
4582 {
4583 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4584 }
4585 
4586 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4587 	struct eventfd_ctx *eventfd, enum res_type type)
4588 {
4589 	struct mem_cgroup_thresholds *thresholds;
4590 	struct mem_cgroup_threshold_ary *new;
4591 	unsigned long usage;
4592 	int i, j, size, entries;
4593 
4594 	mutex_lock(&memcg->thresholds_lock);
4595 
4596 	if (type == _MEM) {
4597 		thresholds = &memcg->thresholds;
4598 		usage = mem_cgroup_usage(memcg, false);
4599 	} else if (type == _MEMSWAP) {
4600 		thresholds = &memcg->memsw_thresholds;
4601 		usage = mem_cgroup_usage(memcg, true);
4602 	} else
4603 		BUG();
4604 
4605 	if (!thresholds->primary)
4606 		goto unlock;
4607 
4608 	/* Check if a threshold crossed before removing */
4609 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4610 
4611 	/* Calculate new number of threshold */
4612 	size = entries = 0;
4613 	for (i = 0; i < thresholds->primary->size; i++) {
4614 		if (thresholds->primary->entries[i].eventfd != eventfd)
4615 			size++;
4616 		else
4617 			entries++;
4618 	}
4619 
4620 	new = thresholds->spare;
4621 
4622 	/* If no items related to eventfd have been cleared, nothing to do */
4623 	if (!entries)
4624 		goto unlock;
4625 
4626 	/* Set thresholds array to NULL if we don't have thresholds */
4627 	if (!size) {
4628 		kfree(new);
4629 		new = NULL;
4630 		goto swap_buffers;
4631 	}
4632 
4633 	new->size = size;
4634 
4635 	/* Copy thresholds and find current threshold */
4636 	new->current_threshold = -1;
4637 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4638 		if (thresholds->primary->entries[i].eventfd == eventfd)
4639 			continue;
4640 
4641 		new->entries[j] = thresholds->primary->entries[i];
4642 		if (new->entries[j].threshold <= usage) {
4643 			/*
4644 			 * new->current_threshold will not be used
4645 			 * until rcu_assign_pointer(), so it's safe to increment
4646 			 * it here.
4647 			 */
4648 			++new->current_threshold;
4649 		}
4650 		j++;
4651 	}
4652 
4653 swap_buffers:
4654 	/* Swap primary and spare array */
4655 	thresholds->spare = thresholds->primary;
4656 
4657 	rcu_assign_pointer(thresholds->primary, new);
4658 
4659 	/* To be sure that nobody uses thresholds */
4660 	synchronize_rcu();
4661 
4662 	/* If all events are unregistered, free the spare array */
4663 	if (!new) {
4664 		kfree(thresholds->spare);
4665 		thresholds->spare = NULL;
4666 	}
4667 unlock:
4668 	mutex_unlock(&memcg->thresholds_lock);
4669 }
4670 
4671 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4672 	struct eventfd_ctx *eventfd)
4673 {
4674 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4675 }
4676 
4677 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4678 	struct eventfd_ctx *eventfd)
4679 {
4680 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4681 }
4682 
4683 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4684 	struct eventfd_ctx *eventfd, const char *args)
4685 {
4686 	struct mem_cgroup_eventfd_list *event;
4687 
4688 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4689 	if (!event)
4690 		return -ENOMEM;
4691 
4692 	spin_lock(&memcg_oom_lock);
4693 
4694 	event->eventfd = eventfd;
4695 	list_add(&event->list, &memcg->oom_notify);
4696 
4697 	/* already in OOM ? */
4698 	if (memcg->under_oom)
4699 		eventfd_signal(eventfd);
4700 	spin_unlock(&memcg_oom_lock);
4701 
4702 	return 0;
4703 }
4704 
4705 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4706 	struct eventfd_ctx *eventfd)
4707 {
4708 	struct mem_cgroup_eventfd_list *ev, *tmp;
4709 
4710 	spin_lock(&memcg_oom_lock);
4711 
4712 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4713 		if (ev->eventfd == eventfd) {
4714 			list_del(&ev->list);
4715 			kfree(ev);
4716 		}
4717 	}
4718 
4719 	spin_unlock(&memcg_oom_lock);
4720 }
4721 
4722 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4723 {
4724 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4725 
4726 	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4727 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4728 	seq_printf(sf, "oom_kill %lu\n",
4729 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4730 	return 0;
4731 }
4732 
4733 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4734 	struct cftype *cft, u64 val)
4735 {
4736 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4737 
4738 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4739 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4740 		return -EINVAL;
4741 
4742 	WRITE_ONCE(memcg->oom_kill_disable, val);
4743 	if (!val)
4744 		memcg_oom_recover(memcg);
4745 
4746 	return 0;
4747 }
4748 
4749 #ifdef CONFIG_CGROUP_WRITEBACK
4750 
4751 #include <trace/events/writeback.h>
4752 
4753 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4754 {
4755 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4756 }
4757 
4758 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4759 {
4760 	wb_domain_exit(&memcg->cgwb_domain);
4761 }
4762 
4763 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4764 {
4765 	wb_domain_size_changed(&memcg->cgwb_domain);
4766 }
4767 
4768 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4769 {
4770 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4771 
4772 	if (!memcg->css.parent)
4773 		return NULL;
4774 
4775 	return &memcg->cgwb_domain;
4776 }
4777 
4778 /**
4779  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4780  * @wb: bdi_writeback in question
4781  * @pfilepages: out parameter for number of file pages
4782  * @pheadroom: out parameter for number of allocatable pages according to memcg
4783  * @pdirty: out parameter for number of dirty pages
4784  * @pwriteback: out parameter for number of pages under writeback
4785  *
4786  * Determine the numbers of file, headroom, dirty, and writeback pages in
4787  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4788  * is a bit more involved.
4789  *
4790  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4791  * headroom is calculated as the lowest headroom of itself and the
4792  * ancestors.  Note that this doesn't consider the actual amount of
4793  * available memory in the system.  The caller should further cap
4794  * *@pheadroom accordingly.
4795  */
4796 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4797 			 unsigned long *pheadroom, unsigned long *pdirty,
4798 			 unsigned long *pwriteback)
4799 {
4800 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4801 	struct mem_cgroup *parent;
4802 
4803 	mem_cgroup_flush_stats(memcg);
4804 
4805 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4806 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4807 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4808 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4809 
4810 	*pheadroom = PAGE_COUNTER_MAX;
4811 	while ((parent = parent_mem_cgroup(memcg))) {
4812 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4813 					    READ_ONCE(memcg->memory.high));
4814 		unsigned long used = page_counter_read(&memcg->memory);
4815 
4816 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4817 		memcg = parent;
4818 	}
4819 }
4820 
4821 /*
4822  * Foreign dirty flushing
4823  *
4824  * There's an inherent mismatch between memcg and writeback.  The former
4825  * tracks ownership per-page while the latter per-inode.  This was a
4826  * deliberate design decision because honoring per-page ownership in the
4827  * writeback path is complicated, may lead to higher CPU and IO overheads
4828  * and deemed unnecessary given that write-sharing an inode across
4829  * different cgroups isn't a common use-case.
4830  *
4831  * Combined with inode majority-writer ownership switching, this works well
4832  * enough in most cases but there are some pathological cases.  For
4833  * example, let's say there are two cgroups A and B which keep writing to
4834  * different but confined parts of the same inode.  B owns the inode and
4835  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4836  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4837  * triggering background writeback.  A will be slowed down without a way to
4838  * make writeback of the dirty pages happen.
4839  *
4840  * Conditions like the above can lead to a cgroup getting repeatedly and
4841  * severely throttled after making some progress after each
4842  * dirty_expire_interval while the underlying IO device is almost
4843  * completely idle.
4844  *
4845  * Solving this problem completely requires matching the ownership tracking
4846  * granularities between memcg and writeback in either direction.  However,
4847  * the more egregious behaviors can be avoided by simply remembering the
4848  * most recent foreign dirtying events and initiating remote flushes on
4849  * them when local writeback isn't enough to keep the memory clean enough.
4850  *
4851  * The following two functions implement such mechanism.  When a foreign
4852  * page - a page whose memcg and writeback ownerships don't match - is
4853  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4854  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4855  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4856  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4857  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4858  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4859  * limited to MEMCG_CGWB_FRN_CNT.
4860  *
4861  * The mechanism only remembers IDs and doesn't hold any object references.
4862  * As being wrong occasionally doesn't matter, updates and accesses to the
4863  * records are lockless and racy.
4864  */
4865 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4866 					     struct bdi_writeback *wb)
4867 {
4868 	struct mem_cgroup *memcg = folio_memcg(folio);
4869 	struct memcg_cgwb_frn *frn;
4870 	u64 now = get_jiffies_64();
4871 	u64 oldest_at = now;
4872 	int oldest = -1;
4873 	int i;
4874 
4875 	trace_track_foreign_dirty(folio, wb);
4876 
4877 	/*
4878 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4879 	 * using it.  If not replace the oldest one which isn't being
4880 	 * written out.
4881 	 */
4882 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4883 		frn = &memcg->cgwb_frn[i];
4884 		if (frn->bdi_id == wb->bdi->id &&
4885 		    frn->memcg_id == wb->memcg_css->id)
4886 			break;
4887 		if (time_before64(frn->at, oldest_at) &&
4888 		    atomic_read(&frn->done.cnt) == 1) {
4889 			oldest = i;
4890 			oldest_at = frn->at;
4891 		}
4892 	}
4893 
4894 	if (i < MEMCG_CGWB_FRN_CNT) {
4895 		/*
4896 		 * Re-using an existing one.  Update timestamp lazily to
4897 		 * avoid making the cacheline hot.  We want them to be
4898 		 * reasonably up-to-date and significantly shorter than
4899 		 * dirty_expire_interval as that's what expires the record.
4900 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4901 		 */
4902 		unsigned long update_intv =
4903 			min_t(unsigned long, HZ,
4904 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4905 
4906 		if (time_before64(frn->at, now - update_intv))
4907 			frn->at = now;
4908 	} else if (oldest >= 0) {
4909 		/* replace the oldest free one */
4910 		frn = &memcg->cgwb_frn[oldest];
4911 		frn->bdi_id = wb->bdi->id;
4912 		frn->memcg_id = wb->memcg_css->id;
4913 		frn->at = now;
4914 	}
4915 }
4916 
4917 /* issue foreign writeback flushes for recorded foreign dirtying events */
4918 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4919 {
4920 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4921 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4922 	u64 now = jiffies_64;
4923 	int i;
4924 
4925 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4926 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4927 
4928 		/*
4929 		 * If the record is older than dirty_expire_interval,
4930 		 * writeback on it has already started.  No need to kick it
4931 		 * off again.  Also, don't start a new one if there's
4932 		 * already one in flight.
4933 		 */
4934 		if (time_after64(frn->at, now - intv) &&
4935 		    atomic_read(&frn->done.cnt) == 1) {
4936 			frn->at = 0;
4937 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4938 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4939 					       WB_REASON_FOREIGN_FLUSH,
4940 					       &frn->done);
4941 		}
4942 	}
4943 }
4944 
4945 #else	/* CONFIG_CGROUP_WRITEBACK */
4946 
4947 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4948 {
4949 	return 0;
4950 }
4951 
4952 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4953 {
4954 }
4955 
4956 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4957 {
4958 }
4959 
4960 #endif	/* CONFIG_CGROUP_WRITEBACK */
4961 
4962 /*
4963  * DO NOT USE IN NEW FILES.
4964  *
4965  * "cgroup.event_control" implementation.
4966  *
4967  * This is way over-engineered.  It tries to support fully configurable
4968  * events for each user.  Such level of flexibility is completely
4969  * unnecessary especially in the light of the planned unified hierarchy.
4970  *
4971  * Please deprecate this and replace with something simpler if at all
4972  * possible.
4973  */
4974 
4975 /*
4976  * Unregister event and free resources.
4977  *
4978  * Gets called from workqueue.
4979  */
4980 static void memcg_event_remove(struct work_struct *work)
4981 {
4982 	struct mem_cgroup_event *event =
4983 		container_of(work, struct mem_cgroup_event, remove);
4984 	struct mem_cgroup *memcg = event->memcg;
4985 
4986 	remove_wait_queue(event->wqh, &event->wait);
4987 
4988 	event->unregister_event(memcg, event->eventfd);
4989 
4990 	/* Notify userspace the event is going away. */
4991 	eventfd_signal(event->eventfd);
4992 
4993 	eventfd_ctx_put(event->eventfd);
4994 	kfree(event);
4995 	css_put(&memcg->css);
4996 }
4997 
4998 /*
4999  * Gets called on EPOLLHUP on eventfd when user closes it.
5000  *
5001  * Called with wqh->lock held and interrupts disabled.
5002  */
5003 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5004 			    int sync, void *key)
5005 {
5006 	struct mem_cgroup_event *event =
5007 		container_of(wait, struct mem_cgroup_event, wait);
5008 	struct mem_cgroup *memcg = event->memcg;
5009 	__poll_t flags = key_to_poll(key);
5010 
5011 	if (flags & EPOLLHUP) {
5012 		/*
5013 		 * If the event has been detached at cgroup removal, we
5014 		 * can simply return knowing the other side will cleanup
5015 		 * for us.
5016 		 *
5017 		 * We can't race against event freeing since the other
5018 		 * side will require wqh->lock via remove_wait_queue(),
5019 		 * which we hold.
5020 		 */
5021 		spin_lock(&memcg->event_list_lock);
5022 		if (!list_empty(&event->list)) {
5023 			list_del_init(&event->list);
5024 			/*
5025 			 * We are in atomic context, but cgroup_event_remove()
5026 			 * may sleep, so we have to call it in workqueue.
5027 			 */
5028 			schedule_work(&event->remove);
5029 		}
5030 		spin_unlock(&memcg->event_list_lock);
5031 	}
5032 
5033 	return 0;
5034 }
5035 
5036 static void memcg_event_ptable_queue_proc(struct file *file,
5037 		wait_queue_head_t *wqh, poll_table *pt)
5038 {
5039 	struct mem_cgroup_event *event =
5040 		container_of(pt, struct mem_cgroup_event, pt);
5041 
5042 	event->wqh = wqh;
5043 	add_wait_queue(wqh, &event->wait);
5044 }
5045 
5046 /*
5047  * DO NOT USE IN NEW FILES.
5048  *
5049  * Parse input and register new cgroup event handler.
5050  *
5051  * Input must be in format '<event_fd> <control_fd> <args>'.
5052  * Interpretation of args is defined by control file implementation.
5053  */
5054 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5055 					 char *buf, size_t nbytes, loff_t off)
5056 {
5057 	struct cgroup_subsys_state *css = of_css(of);
5058 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5059 	struct mem_cgroup_event *event;
5060 	struct cgroup_subsys_state *cfile_css;
5061 	unsigned int efd, cfd;
5062 	struct fd efile;
5063 	struct fd cfile;
5064 	struct dentry *cdentry;
5065 	const char *name;
5066 	char *endp;
5067 	int ret;
5068 
5069 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
5070 		return -EOPNOTSUPP;
5071 
5072 	buf = strstrip(buf);
5073 
5074 	efd = simple_strtoul(buf, &endp, 10);
5075 	if (*endp != ' ')
5076 		return -EINVAL;
5077 	buf = endp + 1;
5078 
5079 	cfd = simple_strtoul(buf, &endp, 10);
5080 	if ((*endp != ' ') && (*endp != '\0'))
5081 		return -EINVAL;
5082 	buf = endp + 1;
5083 
5084 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5085 	if (!event)
5086 		return -ENOMEM;
5087 
5088 	event->memcg = memcg;
5089 	INIT_LIST_HEAD(&event->list);
5090 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5091 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5092 	INIT_WORK(&event->remove, memcg_event_remove);
5093 
5094 	efile = fdget(efd);
5095 	if (!efile.file) {
5096 		ret = -EBADF;
5097 		goto out_kfree;
5098 	}
5099 
5100 	event->eventfd = eventfd_ctx_fileget(efile.file);
5101 	if (IS_ERR(event->eventfd)) {
5102 		ret = PTR_ERR(event->eventfd);
5103 		goto out_put_efile;
5104 	}
5105 
5106 	cfile = fdget(cfd);
5107 	if (!cfile.file) {
5108 		ret = -EBADF;
5109 		goto out_put_eventfd;
5110 	}
5111 
5112 	/* the process need read permission on control file */
5113 	/* AV: shouldn't we check that it's been opened for read instead? */
5114 	ret = file_permission(cfile.file, MAY_READ);
5115 	if (ret < 0)
5116 		goto out_put_cfile;
5117 
5118 	/*
5119 	 * The control file must be a regular cgroup1 file. As a regular cgroup
5120 	 * file can't be renamed, it's safe to access its name afterwards.
5121 	 */
5122 	cdentry = cfile.file->f_path.dentry;
5123 	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5124 		ret = -EINVAL;
5125 		goto out_put_cfile;
5126 	}
5127 
5128 	/*
5129 	 * Determine the event callbacks and set them in @event.  This used
5130 	 * to be done via struct cftype but cgroup core no longer knows
5131 	 * about these events.  The following is crude but the whole thing
5132 	 * is for compatibility anyway.
5133 	 *
5134 	 * DO NOT ADD NEW FILES.
5135 	 */
5136 	name = cdentry->d_name.name;
5137 
5138 	if (!strcmp(name, "memory.usage_in_bytes")) {
5139 		event->register_event = mem_cgroup_usage_register_event;
5140 		event->unregister_event = mem_cgroup_usage_unregister_event;
5141 	} else if (!strcmp(name, "memory.oom_control")) {
5142 		event->register_event = mem_cgroup_oom_register_event;
5143 		event->unregister_event = mem_cgroup_oom_unregister_event;
5144 	} else if (!strcmp(name, "memory.pressure_level")) {
5145 		event->register_event = vmpressure_register_event;
5146 		event->unregister_event = vmpressure_unregister_event;
5147 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5148 		event->register_event = memsw_cgroup_usage_register_event;
5149 		event->unregister_event = memsw_cgroup_usage_unregister_event;
5150 	} else {
5151 		ret = -EINVAL;
5152 		goto out_put_cfile;
5153 	}
5154 
5155 	/*
5156 	 * Verify @cfile should belong to @css.  Also, remaining events are
5157 	 * automatically removed on cgroup destruction but the removal is
5158 	 * asynchronous, so take an extra ref on @css.
5159 	 */
5160 	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5161 					       &memory_cgrp_subsys);
5162 	ret = -EINVAL;
5163 	if (IS_ERR(cfile_css))
5164 		goto out_put_cfile;
5165 	if (cfile_css != css) {
5166 		css_put(cfile_css);
5167 		goto out_put_cfile;
5168 	}
5169 
5170 	ret = event->register_event(memcg, event->eventfd, buf);
5171 	if (ret)
5172 		goto out_put_css;
5173 
5174 	vfs_poll(efile.file, &event->pt);
5175 
5176 	spin_lock_irq(&memcg->event_list_lock);
5177 	list_add(&event->list, &memcg->event_list);
5178 	spin_unlock_irq(&memcg->event_list_lock);
5179 
5180 	fdput(cfile);
5181 	fdput(efile);
5182 
5183 	return nbytes;
5184 
5185 out_put_css:
5186 	css_put(css);
5187 out_put_cfile:
5188 	fdput(cfile);
5189 out_put_eventfd:
5190 	eventfd_ctx_put(event->eventfd);
5191 out_put_efile:
5192 	fdput(efile);
5193 out_kfree:
5194 	kfree(event);
5195 
5196 	return ret;
5197 }
5198 
5199 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5200 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5201 {
5202 	/*
5203 	 * Deprecated.
5204 	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5205 	 */
5206 	return 0;
5207 }
5208 #endif
5209 
5210 static int memory_stat_show(struct seq_file *m, void *v);
5211 
5212 static struct cftype mem_cgroup_legacy_files[] = {
5213 	{
5214 		.name = "usage_in_bytes",
5215 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5216 		.read_u64 = mem_cgroup_read_u64,
5217 	},
5218 	{
5219 		.name = "max_usage_in_bytes",
5220 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5221 		.write = mem_cgroup_reset,
5222 		.read_u64 = mem_cgroup_read_u64,
5223 	},
5224 	{
5225 		.name = "limit_in_bytes",
5226 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5227 		.write = mem_cgroup_write,
5228 		.read_u64 = mem_cgroup_read_u64,
5229 	},
5230 	{
5231 		.name = "soft_limit_in_bytes",
5232 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5233 		.write = mem_cgroup_write,
5234 		.read_u64 = mem_cgroup_read_u64,
5235 	},
5236 	{
5237 		.name = "failcnt",
5238 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5239 		.write = mem_cgroup_reset,
5240 		.read_u64 = mem_cgroup_read_u64,
5241 	},
5242 	{
5243 		.name = "stat",
5244 		.seq_show = memory_stat_show,
5245 	},
5246 	{
5247 		.name = "force_empty",
5248 		.write = mem_cgroup_force_empty_write,
5249 	},
5250 	{
5251 		.name = "use_hierarchy",
5252 		.write_u64 = mem_cgroup_hierarchy_write,
5253 		.read_u64 = mem_cgroup_hierarchy_read,
5254 	},
5255 	{
5256 		.name = "cgroup.event_control",		/* XXX: for compat */
5257 		.write = memcg_write_event_control,
5258 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5259 	},
5260 	{
5261 		.name = "swappiness",
5262 		.read_u64 = mem_cgroup_swappiness_read,
5263 		.write_u64 = mem_cgroup_swappiness_write,
5264 	},
5265 	{
5266 		.name = "move_charge_at_immigrate",
5267 		.read_u64 = mem_cgroup_move_charge_read,
5268 		.write_u64 = mem_cgroup_move_charge_write,
5269 	},
5270 	{
5271 		.name = "oom_control",
5272 		.seq_show = mem_cgroup_oom_control_read,
5273 		.write_u64 = mem_cgroup_oom_control_write,
5274 	},
5275 	{
5276 		.name = "pressure_level",
5277 		.seq_show = mem_cgroup_dummy_seq_show,
5278 	},
5279 #ifdef CONFIG_NUMA
5280 	{
5281 		.name = "numa_stat",
5282 		.seq_show = memcg_numa_stat_show,
5283 	},
5284 #endif
5285 	{
5286 		.name = "kmem.limit_in_bytes",
5287 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5288 		.write = mem_cgroup_write,
5289 		.read_u64 = mem_cgroup_read_u64,
5290 	},
5291 	{
5292 		.name = "kmem.usage_in_bytes",
5293 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5294 		.read_u64 = mem_cgroup_read_u64,
5295 	},
5296 	{
5297 		.name = "kmem.failcnt",
5298 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5299 		.write = mem_cgroup_reset,
5300 		.read_u64 = mem_cgroup_read_u64,
5301 	},
5302 	{
5303 		.name = "kmem.max_usage_in_bytes",
5304 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5305 		.write = mem_cgroup_reset,
5306 		.read_u64 = mem_cgroup_read_u64,
5307 	},
5308 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5309 	{
5310 		.name = "kmem.slabinfo",
5311 		.seq_show = mem_cgroup_slab_show,
5312 	},
5313 #endif
5314 	{
5315 		.name = "kmem.tcp.limit_in_bytes",
5316 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5317 		.write = mem_cgroup_write,
5318 		.read_u64 = mem_cgroup_read_u64,
5319 	},
5320 	{
5321 		.name = "kmem.tcp.usage_in_bytes",
5322 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5323 		.read_u64 = mem_cgroup_read_u64,
5324 	},
5325 	{
5326 		.name = "kmem.tcp.failcnt",
5327 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5328 		.write = mem_cgroup_reset,
5329 		.read_u64 = mem_cgroup_read_u64,
5330 	},
5331 	{
5332 		.name = "kmem.tcp.max_usage_in_bytes",
5333 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5334 		.write = mem_cgroup_reset,
5335 		.read_u64 = mem_cgroup_read_u64,
5336 	},
5337 	{ },	/* terminate */
5338 };
5339 
5340 /*
5341  * Private memory cgroup IDR
5342  *
5343  * Swap-out records and page cache shadow entries need to store memcg
5344  * references in constrained space, so we maintain an ID space that is
5345  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5346  * memory-controlled cgroups to 64k.
5347  *
5348  * However, there usually are many references to the offline CSS after
5349  * the cgroup has been destroyed, such as page cache or reclaimable
5350  * slab objects, that don't need to hang on to the ID. We want to keep
5351  * those dead CSS from occupying IDs, or we might quickly exhaust the
5352  * relatively small ID space and prevent the creation of new cgroups
5353  * even when there are much fewer than 64k cgroups - possibly none.
5354  *
5355  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5356  * be freed and recycled when it's no longer needed, which is usually
5357  * when the CSS is offlined.
5358  *
5359  * The only exception to that are records of swapped out tmpfs/shmem
5360  * pages that need to be attributed to live ancestors on swapin. But
5361  * those references are manageable from userspace.
5362  */
5363 
5364 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5365 static DEFINE_IDR(mem_cgroup_idr);
5366 
5367 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5368 {
5369 	if (memcg->id.id > 0) {
5370 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5371 		memcg->id.id = 0;
5372 	}
5373 }
5374 
5375 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5376 						  unsigned int n)
5377 {
5378 	refcount_add(n, &memcg->id.ref);
5379 }
5380 
5381 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5382 {
5383 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5384 		mem_cgroup_id_remove(memcg);
5385 
5386 		/* Memcg ID pins CSS */
5387 		css_put(&memcg->css);
5388 	}
5389 }
5390 
5391 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5392 {
5393 	mem_cgroup_id_put_many(memcg, 1);
5394 }
5395 
5396 /**
5397  * mem_cgroup_from_id - look up a memcg from a memcg id
5398  * @id: the memcg id to look up
5399  *
5400  * Caller must hold rcu_read_lock().
5401  */
5402 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5403 {
5404 	WARN_ON_ONCE(!rcu_read_lock_held());
5405 	return idr_find(&mem_cgroup_idr, id);
5406 }
5407 
5408 #ifdef CONFIG_SHRINKER_DEBUG
5409 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5410 {
5411 	struct cgroup *cgrp;
5412 	struct cgroup_subsys_state *css;
5413 	struct mem_cgroup *memcg;
5414 
5415 	cgrp = cgroup_get_from_id(ino);
5416 	if (IS_ERR(cgrp))
5417 		return ERR_CAST(cgrp);
5418 
5419 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5420 	if (css)
5421 		memcg = container_of(css, struct mem_cgroup, css);
5422 	else
5423 		memcg = ERR_PTR(-ENOENT);
5424 
5425 	cgroup_put(cgrp);
5426 
5427 	return memcg;
5428 }
5429 #endif
5430 
5431 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5432 {
5433 	struct mem_cgroup_per_node *pn;
5434 
5435 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5436 	if (!pn)
5437 		return 1;
5438 
5439 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5440 						   GFP_KERNEL_ACCOUNT);
5441 	if (!pn->lruvec_stats_percpu) {
5442 		kfree(pn);
5443 		return 1;
5444 	}
5445 
5446 	lruvec_init(&pn->lruvec);
5447 	pn->memcg = memcg;
5448 
5449 	memcg->nodeinfo[node] = pn;
5450 	return 0;
5451 }
5452 
5453 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5454 {
5455 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5456 
5457 	if (!pn)
5458 		return;
5459 
5460 	free_percpu(pn->lruvec_stats_percpu);
5461 	kfree(pn);
5462 }
5463 
5464 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5465 {
5466 	int node;
5467 
5468 	if (memcg->orig_objcg)
5469 		obj_cgroup_put(memcg->orig_objcg);
5470 
5471 	for_each_node(node)
5472 		free_mem_cgroup_per_node_info(memcg, node);
5473 	kfree(memcg->vmstats);
5474 	free_percpu(memcg->vmstats_percpu);
5475 	kfree(memcg);
5476 }
5477 
5478 static void mem_cgroup_free(struct mem_cgroup *memcg)
5479 {
5480 	lru_gen_exit_memcg(memcg);
5481 	memcg_wb_domain_exit(memcg);
5482 	__mem_cgroup_free(memcg);
5483 }
5484 
5485 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5486 {
5487 	struct memcg_vmstats_percpu *statc, *pstatc;
5488 	struct mem_cgroup *memcg;
5489 	int node, cpu;
5490 	int __maybe_unused i;
5491 	long error = -ENOMEM;
5492 
5493 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5494 	if (!memcg)
5495 		return ERR_PTR(error);
5496 
5497 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5498 				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5499 	if (memcg->id.id < 0) {
5500 		error = memcg->id.id;
5501 		goto fail;
5502 	}
5503 
5504 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5505 	if (!memcg->vmstats)
5506 		goto fail;
5507 
5508 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5509 						 GFP_KERNEL_ACCOUNT);
5510 	if (!memcg->vmstats_percpu)
5511 		goto fail;
5512 
5513 	for_each_possible_cpu(cpu) {
5514 		if (parent)
5515 			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5516 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5517 		statc->parent = parent ? pstatc : NULL;
5518 		statc->vmstats = memcg->vmstats;
5519 	}
5520 
5521 	for_each_node(node)
5522 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5523 			goto fail;
5524 
5525 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5526 		goto fail;
5527 
5528 	INIT_WORK(&memcg->high_work, high_work_func);
5529 	INIT_LIST_HEAD(&memcg->oom_notify);
5530 	mutex_init(&memcg->thresholds_lock);
5531 	spin_lock_init(&memcg->move_lock);
5532 	vmpressure_init(&memcg->vmpressure);
5533 	INIT_LIST_HEAD(&memcg->event_list);
5534 	spin_lock_init(&memcg->event_list_lock);
5535 	memcg->socket_pressure = jiffies;
5536 #ifdef CONFIG_MEMCG_KMEM
5537 	memcg->kmemcg_id = -1;
5538 	INIT_LIST_HEAD(&memcg->objcg_list);
5539 #endif
5540 #ifdef CONFIG_CGROUP_WRITEBACK
5541 	INIT_LIST_HEAD(&memcg->cgwb_list);
5542 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5543 		memcg->cgwb_frn[i].done =
5544 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5545 #endif
5546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5547 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5548 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5549 	memcg->deferred_split_queue.split_queue_len = 0;
5550 #endif
5551 	lru_gen_init_memcg(memcg);
5552 	return memcg;
5553 fail:
5554 	mem_cgroup_id_remove(memcg);
5555 	__mem_cgroup_free(memcg);
5556 	return ERR_PTR(error);
5557 }
5558 
5559 static struct cgroup_subsys_state * __ref
5560 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5561 {
5562 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5563 	struct mem_cgroup *memcg, *old_memcg;
5564 
5565 	old_memcg = set_active_memcg(parent);
5566 	memcg = mem_cgroup_alloc(parent);
5567 	set_active_memcg(old_memcg);
5568 	if (IS_ERR(memcg))
5569 		return ERR_CAST(memcg);
5570 
5571 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5572 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5573 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5574 	memcg->zswap_max = PAGE_COUNTER_MAX;
5575 	WRITE_ONCE(memcg->zswap_writeback,
5576 		!parent || READ_ONCE(parent->zswap_writeback));
5577 #endif
5578 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5579 	if (parent) {
5580 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5581 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5582 
5583 		page_counter_init(&memcg->memory, &parent->memory);
5584 		page_counter_init(&memcg->swap, &parent->swap);
5585 		page_counter_init(&memcg->kmem, &parent->kmem);
5586 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5587 	} else {
5588 		init_memcg_events();
5589 		page_counter_init(&memcg->memory, NULL);
5590 		page_counter_init(&memcg->swap, NULL);
5591 		page_counter_init(&memcg->kmem, NULL);
5592 		page_counter_init(&memcg->tcpmem, NULL);
5593 
5594 		root_mem_cgroup = memcg;
5595 		return &memcg->css;
5596 	}
5597 
5598 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5599 		static_branch_inc(&memcg_sockets_enabled_key);
5600 
5601 #if defined(CONFIG_MEMCG_KMEM)
5602 	if (!cgroup_memory_nobpf)
5603 		static_branch_inc(&memcg_bpf_enabled_key);
5604 #endif
5605 
5606 	return &memcg->css;
5607 }
5608 
5609 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5610 {
5611 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5612 
5613 	if (memcg_online_kmem(memcg))
5614 		goto remove_id;
5615 
5616 	/*
5617 	 * A memcg must be visible for expand_shrinker_info()
5618 	 * by the time the maps are allocated. So, we allocate maps
5619 	 * here, when for_each_mem_cgroup() can't skip it.
5620 	 */
5621 	if (alloc_shrinker_info(memcg))
5622 		goto offline_kmem;
5623 
5624 	if (unlikely(mem_cgroup_is_root(memcg)))
5625 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5626 				   FLUSH_TIME);
5627 	lru_gen_online_memcg(memcg);
5628 
5629 	/* Online state pins memcg ID, memcg ID pins CSS */
5630 	refcount_set(&memcg->id.ref, 1);
5631 	css_get(css);
5632 
5633 	/*
5634 	 * Ensure mem_cgroup_from_id() works once we're fully online.
5635 	 *
5636 	 * We could do this earlier and require callers to filter with
5637 	 * css_tryget_online(). But right now there are no users that
5638 	 * need earlier access, and the workingset code relies on the
5639 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5640 	 * publish it here at the end of onlining. This matches the
5641 	 * regular ID destruction during offlining.
5642 	 */
5643 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5644 
5645 	return 0;
5646 offline_kmem:
5647 	memcg_offline_kmem(memcg);
5648 remove_id:
5649 	mem_cgroup_id_remove(memcg);
5650 	return -ENOMEM;
5651 }
5652 
5653 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5654 {
5655 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5656 	struct mem_cgroup_event *event, *tmp;
5657 
5658 	/*
5659 	 * Unregister events and notify userspace.
5660 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5661 	 * directory to avoid race between userspace and kernelspace.
5662 	 */
5663 	spin_lock_irq(&memcg->event_list_lock);
5664 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5665 		list_del_init(&event->list);
5666 		schedule_work(&event->remove);
5667 	}
5668 	spin_unlock_irq(&memcg->event_list_lock);
5669 
5670 	page_counter_set_min(&memcg->memory, 0);
5671 	page_counter_set_low(&memcg->memory, 0);
5672 
5673 	zswap_memcg_offline_cleanup(memcg);
5674 
5675 	memcg_offline_kmem(memcg);
5676 	reparent_shrinker_deferred(memcg);
5677 	wb_memcg_offline(memcg);
5678 	lru_gen_offline_memcg(memcg);
5679 
5680 	drain_all_stock(memcg);
5681 
5682 	mem_cgroup_id_put(memcg);
5683 }
5684 
5685 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5686 {
5687 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5688 
5689 	invalidate_reclaim_iterators(memcg);
5690 	lru_gen_release_memcg(memcg);
5691 }
5692 
5693 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5694 {
5695 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5696 	int __maybe_unused i;
5697 
5698 #ifdef CONFIG_CGROUP_WRITEBACK
5699 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5700 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5701 #endif
5702 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5703 		static_branch_dec(&memcg_sockets_enabled_key);
5704 
5705 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5706 		static_branch_dec(&memcg_sockets_enabled_key);
5707 
5708 #if defined(CONFIG_MEMCG_KMEM)
5709 	if (!cgroup_memory_nobpf)
5710 		static_branch_dec(&memcg_bpf_enabled_key);
5711 #endif
5712 
5713 	vmpressure_cleanup(&memcg->vmpressure);
5714 	cancel_work_sync(&memcg->high_work);
5715 	mem_cgroup_remove_from_trees(memcg);
5716 	free_shrinker_info(memcg);
5717 	mem_cgroup_free(memcg);
5718 }
5719 
5720 /**
5721  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5722  * @css: the target css
5723  *
5724  * Reset the states of the mem_cgroup associated with @css.  This is
5725  * invoked when the userland requests disabling on the default hierarchy
5726  * but the memcg is pinned through dependency.  The memcg should stop
5727  * applying policies and should revert to the vanilla state as it may be
5728  * made visible again.
5729  *
5730  * The current implementation only resets the essential configurations.
5731  * This needs to be expanded to cover all the visible parts.
5732  */
5733 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5734 {
5735 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5736 
5737 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5738 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5739 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5740 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5741 	page_counter_set_min(&memcg->memory, 0);
5742 	page_counter_set_low(&memcg->memory, 0);
5743 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5744 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5745 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5746 	memcg_wb_domain_size_changed(memcg);
5747 }
5748 
5749 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5750 {
5751 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5752 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5753 	struct memcg_vmstats_percpu *statc;
5754 	long delta, delta_cpu, v;
5755 	int i, nid;
5756 
5757 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5758 
5759 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5760 		/*
5761 		 * Collect the aggregated propagation counts of groups
5762 		 * below us. We're in a per-cpu loop here and this is
5763 		 * a global counter, so the first cycle will get them.
5764 		 */
5765 		delta = memcg->vmstats->state_pending[i];
5766 		if (delta)
5767 			memcg->vmstats->state_pending[i] = 0;
5768 
5769 		/* Add CPU changes on this level since the last flush */
5770 		delta_cpu = 0;
5771 		v = READ_ONCE(statc->state[i]);
5772 		if (v != statc->state_prev[i]) {
5773 			delta_cpu = v - statc->state_prev[i];
5774 			delta += delta_cpu;
5775 			statc->state_prev[i] = v;
5776 		}
5777 
5778 		/* Aggregate counts on this level and propagate upwards */
5779 		if (delta_cpu)
5780 			memcg->vmstats->state_local[i] += delta_cpu;
5781 
5782 		if (delta) {
5783 			memcg->vmstats->state[i] += delta;
5784 			if (parent)
5785 				parent->vmstats->state_pending[i] += delta;
5786 		}
5787 	}
5788 
5789 	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5790 		delta = memcg->vmstats->events_pending[i];
5791 		if (delta)
5792 			memcg->vmstats->events_pending[i] = 0;
5793 
5794 		delta_cpu = 0;
5795 		v = READ_ONCE(statc->events[i]);
5796 		if (v != statc->events_prev[i]) {
5797 			delta_cpu = v - statc->events_prev[i];
5798 			delta += delta_cpu;
5799 			statc->events_prev[i] = v;
5800 		}
5801 
5802 		if (delta_cpu)
5803 			memcg->vmstats->events_local[i] += delta_cpu;
5804 
5805 		if (delta) {
5806 			memcg->vmstats->events[i] += delta;
5807 			if (parent)
5808 				parent->vmstats->events_pending[i] += delta;
5809 		}
5810 	}
5811 
5812 	for_each_node_state(nid, N_MEMORY) {
5813 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5814 		struct mem_cgroup_per_node *ppn = NULL;
5815 		struct lruvec_stats_percpu *lstatc;
5816 
5817 		if (parent)
5818 			ppn = parent->nodeinfo[nid];
5819 
5820 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5821 
5822 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5823 			delta = pn->lruvec_stats.state_pending[i];
5824 			if (delta)
5825 				pn->lruvec_stats.state_pending[i] = 0;
5826 
5827 			delta_cpu = 0;
5828 			v = READ_ONCE(lstatc->state[i]);
5829 			if (v != lstatc->state_prev[i]) {
5830 				delta_cpu = v - lstatc->state_prev[i];
5831 				delta += delta_cpu;
5832 				lstatc->state_prev[i] = v;
5833 			}
5834 
5835 			if (delta_cpu)
5836 				pn->lruvec_stats.state_local[i] += delta_cpu;
5837 
5838 			if (delta) {
5839 				pn->lruvec_stats.state[i] += delta;
5840 				if (ppn)
5841 					ppn->lruvec_stats.state_pending[i] += delta;
5842 			}
5843 		}
5844 	}
5845 	statc->stats_updates = 0;
5846 	/* We are in a per-cpu loop here, only do the atomic write once */
5847 	if (atomic64_read(&memcg->vmstats->stats_updates))
5848 		atomic64_set(&memcg->vmstats->stats_updates, 0);
5849 }
5850 
5851 #ifdef CONFIG_MMU
5852 /* Handlers for move charge at task migration. */
5853 static int mem_cgroup_do_precharge(unsigned long count)
5854 {
5855 	int ret;
5856 
5857 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5858 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5859 	if (!ret) {
5860 		mc.precharge += count;
5861 		return ret;
5862 	}
5863 
5864 	/* Try charges one by one with reclaim, but do not retry */
5865 	while (count--) {
5866 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5867 		if (ret)
5868 			return ret;
5869 		mc.precharge++;
5870 		cond_resched();
5871 	}
5872 	return 0;
5873 }
5874 
5875 union mc_target {
5876 	struct page	*page;
5877 	swp_entry_t	ent;
5878 };
5879 
5880 enum mc_target_type {
5881 	MC_TARGET_NONE = 0,
5882 	MC_TARGET_PAGE,
5883 	MC_TARGET_SWAP,
5884 	MC_TARGET_DEVICE,
5885 };
5886 
5887 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5888 						unsigned long addr, pte_t ptent)
5889 {
5890 	struct page *page = vm_normal_page(vma, addr, ptent);
5891 
5892 	if (!page)
5893 		return NULL;
5894 	if (PageAnon(page)) {
5895 		if (!(mc.flags & MOVE_ANON))
5896 			return NULL;
5897 	} else {
5898 		if (!(mc.flags & MOVE_FILE))
5899 			return NULL;
5900 	}
5901 	get_page(page);
5902 
5903 	return page;
5904 }
5905 
5906 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5907 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5908 			pte_t ptent, swp_entry_t *entry)
5909 {
5910 	struct page *page = NULL;
5911 	swp_entry_t ent = pte_to_swp_entry(ptent);
5912 
5913 	if (!(mc.flags & MOVE_ANON))
5914 		return NULL;
5915 
5916 	/*
5917 	 * Handle device private pages that are not accessible by the CPU, but
5918 	 * stored as special swap entries in the page table.
5919 	 */
5920 	if (is_device_private_entry(ent)) {
5921 		page = pfn_swap_entry_to_page(ent);
5922 		if (!get_page_unless_zero(page))
5923 			return NULL;
5924 		return page;
5925 	}
5926 
5927 	if (non_swap_entry(ent))
5928 		return NULL;
5929 
5930 	/*
5931 	 * Because swap_cache_get_folio() updates some statistics counter,
5932 	 * we call find_get_page() with swapper_space directly.
5933 	 */
5934 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5935 	entry->val = ent.val;
5936 
5937 	return page;
5938 }
5939 #else
5940 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5941 			pte_t ptent, swp_entry_t *entry)
5942 {
5943 	return NULL;
5944 }
5945 #endif
5946 
5947 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5948 			unsigned long addr, pte_t ptent)
5949 {
5950 	unsigned long index;
5951 	struct folio *folio;
5952 
5953 	if (!vma->vm_file) /* anonymous vma */
5954 		return NULL;
5955 	if (!(mc.flags & MOVE_FILE))
5956 		return NULL;
5957 
5958 	/* folio is moved even if it's not RSS of this task(page-faulted). */
5959 	/* shmem/tmpfs may report page out on swap: account for that too. */
5960 	index = linear_page_index(vma, addr);
5961 	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5962 	if (IS_ERR(folio))
5963 		return NULL;
5964 	return folio_file_page(folio, index);
5965 }
5966 
5967 /**
5968  * mem_cgroup_move_account - move account of the page
5969  * @page: the page
5970  * @compound: charge the page as compound or small page
5971  * @from: mem_cgroup which the page is moved from.
5972  * @to:	mem_cgroup which the page is moved to. @from != @to.
5973  *
5974  * The page must be locked and not on the LRU.
5975  *
5976  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5977  * from old cgroup.
5978  */
5979 static int mem_cgroup_move_account(struct page *page,
5980 				   bool compound,
5981 				   struct mem_cgroup *from,
5982 				   struct mem_cgroup *to)
5983 {
5984 	struct folio *folio = page_folio(page);
5985 	struct lruvec *from_vec, *to_vec;
5986 	struct pglist_data *pgdat;
5987 	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5988 	int nid, ret;
5989 
5990 	VM_BUG_ON(from == to);
5991 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5992 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5993 	VM_BUG_ON(compound && !folio_test_large(folio));
5994 
5995 	ret = -EINVAL;
5996 	if (folio_memcg(folio) != from)
5997 		goto out;
5998 
5999 	pgdat = folio_pgdat(folio);
6000 	from_vec = mem_cgroup_lruvec(from, pgdat);
6001 	to_vec = mem_cgroup_lruvec(to, pgdat);
6002 
6003 	folio_memcg_lock(folio);
6004 
6005 	if (folio_test_anon(folio)) {
6006 		if (folio_mapped(folio)) {
6007 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6008 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6009 			if (folio_test_pmd_mappable(folio)) {
6010 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
6011 						   -nr_pages);
6012 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
6013 						   nr_pages);
6014 			}
6015 		}
6016 	} else {
6017 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6018 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6019 
6020 		if (folio_test_swapbacked(folio)) {
6021 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6022 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6023 		}
6024 
6025 		if (folio_mapped(folio)) {
6026 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6027 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6028 		}
6029 
6030 		if (folio_test_dirty(folio)) {
6031 			struct address_space *mapping = folio_mapping(folio);
6032 
6033 			if (mapping_can_writeback(mapping)) {
6034 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6035 						   -nr_pages);
6036 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6037 						   nr_pages);
6038 			}
6039 		}
6040 	}
6041 
6042 #ifdef CONFIG_SWAP
6043 	if (folio_test_swapcache(folio)) {
6044 		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6045 		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6046 	}
6047 #endif
6048 	if (folio_test_writeback(folio)) {
6049 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6050 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6051 	}
6052 
6053 	/*
6054 	 * All state has been migrated, let's switch to the new memcg.
6055 	 *
6056 	 * It is safe to change page's memcg here because the page
6057 	 * is referenced, charged, isolated, and locked: we can't race
6058 	 * with (un)charging, migration, LRU putback, or anything else
6059 	 * that would rely on a stable page's memory cgroup.
6060 	 *
6061 	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6062 	 * to save space. As soon as we switch page's memory cgroup to a
6063 	 * new memcg that isn't locked, the above state can change
6064 	 * concurrently again. Make sure we're truly done with it.
6065 	 */
6066 	smp_mb();
6067 
6068 	css_get(&to->css);
6069 	css_put(&from->css);
6070 
6071 	folio->memcg_data = (unsigned long)to;
6072 
6073 	__folio_memcg_unlock(from);
6074 
6075 	ret = 0;
6076 	nid = folio_nid(folio);
6077 
6078 	local_irq_disable();
6079 	mem_cgroup_charge_statistics(to, nr_pages);
6080 	memcg_check_events(to, nid);
6081 	mem_cgroup_charge_statistics(from, -nr_pages);
6082 	memcg_check_events(from, nid);
6083 	local_irq_enable();
6084 out:
6085 	return ret;
6086 }
6087 
6088 /**
6089  * get_mctgt_type - get target type of moving charge
6090  * @vma: the vma the pte to be checked belongs
6091  * @addr: the address corresponding to the pte to be checked
6092  * @ptent: the pte to be checked
6093  * @target: the pointer the target page or swap ent will be stored(can be NULL)
6094  *
6095  * Context: Called with pte lock held.
6096  * Return:
6097  * * MC_TARGET_NONE - If the pte is not a target for move charge.
6098  * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6099  *   move charge. If @target is not NULL, the page is stored in target->page
6100  *   with extra refcnt taken (Caller should release it).
6101  * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6102  *   target for charge migration.  If @target is not NULL, the entry is
6103  *   stored in target->ent.
6104  * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6105  *   thus not on the lru.  For now such page is charged like a regular page
6106  *   would be as it is just special memory taking the place of a regular page.
6107  *   See Documentations/vm/hmm.txt and include/linux/hmm.h
6108  */
6109 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6110 		unsigned long addr, pte_t ptent, union mc_target *target)
6111 {
6112 	struct page *page = NULL;
6113 	enum mc_target_type ret = MC_TARGET_NONE;
6114 	swp_entry_t ent = { .val = 0 };
6115 
6116 	if (pte_present(ptent))
6117 		page = mc_handle_present_pte(vma, addr, ptent);
6118 	else if (pte_none_mostly(ptent))
6119 		/*
6120 		 * PTE markers should be treated as a none pte here, separated
6121 		 * from other swap handling below.
6122 		 */
6123 		page = mc_handle_file_pte(vma, addr, ptent);
6124 	else if (is_swap_pte(ptent))
6125 		page = mc_handle_swap_pte(vma, ptent, &ent);
6126 
6127 	if (target && page) {
6128 		if (!trylock_page(page)) {
6129 			put_page(page);
6130 			return ret;
6131 		}
6132 		/*
6133 		 * page_mapped() must be stable during the move. This
6134 		 * pte is locked, so if it's present, the page cannot
6135 		 * become unmapped. If it isn't, we have only partial
6136 		 * control over the mapped state: the page lock will
6137 		 * prevent new faults against pagecache and swapcache,
6138 		 * so an unmapped page cannot become mapped. However,
6139 		 * if the page is already mapped elsewhere, it can
6140 		 * unmap, and there is nothing we can do about it.
6141 		 * Alas, skip moving the page in this case.
6142 		 */
6143 		if (!pte_present(ptent) && page_mapped(page)) {
6144 			unlock_page(page);
6145 			put_page(page);
6146 			return ret;
6147 		}
6148 	}
6149 
6150 	if (!page && !ent.val)
6151 		return ret;
6152 	if (page) {
6153 		/*
6154 		 * Do only loose check w/o serialization.
6155 		 * mem_cgroup_move_account() checks the page is valid or
6156 		 * not under LRU exclusion.
6157 		 */
6158 		if (page_memcg(page) == mc.from) {
6159 			ret = MC_TARGET_PAGE;
6160 			if (is_device_private_page(page) ||
6161 			    is_device_coherent_page(page))
6162 				ret = MC_TARGET_DEVICE;
6163 			if (target)
6164 				target->page = page;
6165 		}
6166 		if (!ret || !target) {
6167 			if (target)
6168 				unlock_page(page);
6169 			put_page(page);
6170 		}
6171 	}
6172 	/*
6173 	 * There is a swap entry and a page doesn't exist or isn't charged.
6174 	 * But we cannot move a tail-page in a THP.
6175 	 */
6176 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6177 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6178 		ret = MC_TARGET_SWAP;
6179 		if (target)
6180 			target->ent = ent;
6181 	}
6182 	return ret;
6183 }
6184 
6185 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6186 /*
6187  * We don't consider PMD mapped swapping or file mapped pages because THP does
6188  * not support them for now.
6189  * Caller should make sure that pmd_trans_huge(pmd) is true.
6190  */
6191 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6192 		unsigned long addr, pmd_t pmd, union mc_target *target)
6193 {
6194 	struct page *page = NULL;
6195 	enum mc_target_type ret = MC_TARGET_NONE;
6196 
6197 	if (unlikely(is_swap_pmd(pmd))) {
6198 		VM_BUG_ON(thp_migration_supported() &&
6199 				  !is_pmd_migration_entry(pmd));
6200 		return ret;
6201 	}
6202 	page = pmd_page(pmd);
6203 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6204 	if (!(mc.flags & MOVE_ANON))
6205 		return ret;
6206 	if (page_memcg(page) == mc.from) {
6207 		ret = MC_TARGET_PAGE;
6208 		if (target) {
6209 			get_page(page);
6210 			if (!trylock_page(page)) {
6211 				put_page(page);
6212 				return MC_TARGET_NONE;
6213 			}
6214 			target->page = page;
6215 		}
6216 	}
6217 	return ret;
6218 }
6219 #else
6220 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6221 		unsigned long addr, pmd_t pmd, union mc_target *target)
6222 {
6223 	return MC_TARGET_NONE;
6224 }
6225 #endif
6226 
6227 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6228 					unsigned long addr, unsigned long end,
6229 					struct mm_walk *walk)
6230 {
6231 	struct vm_area_struct *vma = walk->vma;
6232 	pte_t *pte;
6233 	spinlock_t *ptl;
6234 
6235 	ptl = pmd_trans_huge_lock(pmd, vma);
6236 	if (ptl) {
6237 		/*
6238 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
6239 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6240 		 * this might change.
6241 		 */
6242 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6243 			mc.precharge += HPAGE_PMD_NR;
6244 		spin_unlock(ptl);
6245 		return 0;
6246 	}
6247 
6248 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6249 	if (!pte)
6250 		return 0;
6251 	for (; addr != end; pte++, addr += PAGE_SIZE)
6252 		if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6253 			mc.precharge++;	/* increment precharge temporarily */
6254 	pte_unmap_unlock(pte - 1, ptl);
6255 	cond_resched();
6256 
6257 	return 0;
6258 }
6259 
6260 static const struct mm_walk_ops precharge_walk_ops = {
6261 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6262 	.walk_lock	= PGWALK_RDLOCK,
6263 };
6264 
6265 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6266 {
6267 	unsigned long precharge;
6268 
6269 	mmap_read_lock(mm);
6270 	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6271 	mmap_read_unlock(mm);
6272 
6273 	precharge = mc.precharge;
6274 	mc.precharge = 0;
6275 
6276 	return precharge;
6277 }
6278 
6279 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6280 {
6281 	unsigned long precharge = mem_cgroup_count_precharge(mm);
6282 
6283 	VM_BUG_ON(mc.moving_task);
6284 	mc.moving_task = current;
6285 	return mem_cgroup_do_precharge(precharge);
6286 }
6287 
6288 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6289 static void __mem_cgroup_clear_mc(void)
6290 {
6291 	struct mem_cgroup *from = mc.from;
6292 	struct mem_cgroup *to = mc.to;
6293 
6294 	/* we must uncharge all the leftover precharges from mc.to */
6295 	if (mc.precharge) {
6296 		mem_cgroup_cancel_charge(mc.to, mc.precharge);
6297 		mc.precharge = 0;
6298 	}
6299 	/*
6300 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6301 	 * we must uncharge here.
6302 	 */
6303 	if (mc.moved_charge) {
6304 		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6305 		mc.moved_charge = 0;
6306 	}
6307 	/* we must fixup refcnts and charges */
6308 	if (mc.moved_swap) {
6309 		/* uncharge swap account from the old cgroup */
6310 		if (!mem_cgroup_is_root(mc.from))
6311 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6312 
6313 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6314 
6315 		/*
6316 		 * we charged both to->memory and to->memsw, so we
6317 		 * should uncharge to->memory.
6318 		 */
6319 		if (!mem_cgroup_is_root(mc.to))
6320 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6321 
6322 		mc.moved_swap = 0;
6323 	}
6324 	memcg_oom_recover(from);
6325 	memcg_oom_recover(to);
6326 	wake_up_all(&mc.waitq);
6327 }
6328 
6329 static void mem_cgroup_clear_mc(void)
6330 {
6331 	struct mm_struct *mm = mc.mm;
6332 
6333 	/*
6334 	 * we must clear moving_task before waking up waiters at the end of
6335 	 * task migration.
6336 	 */
6337 	mc.moving_task = NULL;
6338 	__mem_cgroup_clear_mc();
6339 	spin_lock(&mc.lock);
6340 	mc.from = NULL;
6341 	mc.to = NULL;
6342 	mc.mm = NULL;
6343 	spin_unlock(&mc.lock);
6344 
6345 	mmput(mm);
6346 }
6347 
6348 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6349 {
6350 	struct cgroup_subsys_state *css;
6351 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6352 	struct mem_cgroup *from;
6353 	struct task_struct *leader, *p;
6354 	struct mm_struct *mm;
6355 	unsigned long move_flags;
6356 	int ret = 0;
6357 
6358 	/* charge immigration isn't supported on the default hierarchy */
6359 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6360 		return 0;
6361 
6362 	/*
6363 	 * Multi-process migrations only happen on the default hierarchy
6364 	 * where charge immigration is not used.  Perform charge
6365 	 * immigration if @tset contains a leader and whine if there are
6366 	 * multiple.
6367 	 */
6368 	p = NULL;
6369 	cgroup_taskset_for_each_leader(leader, css, tset) {
6370 		WARN_ON_ONCE(p);
6371 		p = leader;
6372 		memcg = mem_cgroup_from_css(css);
6373 	}
6374 	if (!p)
6375 		return 0;
6376 
6377 	/*
6378 	 * We are now committed to this value whatever it is. Changes in this
6379 	 * tunable will only affect upcoming migrations, not the current one.
6380 	 * So we need to save it, and keep it going.
6381 	 */
6382 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6383 	if (!move_flags)
6384 		return 0;
6385 
6386 	from = mem_cgroup_from_task(p);
6387 
6388 	VM_BUG_ON(from == memcg);
6389 
6390 	mm = get_task_mm(p);
6391 	if (!mm)
6392 		return 0;
6393 	/* We move charges only when we move a owner of the mm */
6394 	if (mm->owner == p) {
6395 		VM_BUG_ON(mc.from);
6396 		VM_BUG_ON(mc.to);
6397 		VM_BUG_ON(mc.precharge);
6398 		VM_BUG_ON(mc.moved_charge);
6399 		VM_BUG_ON(mc.moved_swap);
6400 
6401 		spin_lock(&mc.lock);
6402 		mc.mm = mm;
6403 		mc.from = from;
6404 		mc.to = memcg;
6405 		mc.flags = move_flags;
6406 		spin_unlock(&mc.lock);
6407 		/* We set mc.moving_task later */
6408 
6409 		ret = mem_cgroup_precharge_mc(mm);
6410 		if (ret)
6411 			mem_cgroup_clear_mc();
6412 	} else {
6413 		mmput(mm);
6414 	}
6415 	return ret;
6416 }
6417 
6418 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6419 {
6420 	if (mc.to)
6421 		mem_cgroup_clear_mc();
6422 }
6423 
6424 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6425 				unsigned long addr, unsigned long end,
6426 				struct mm_walk *walk)
6427 {
6428 	int ret = 0;
6429 	struct vm_area_struct *vma = walk->vma;
6430 	pte_t *pte;
6431 	spinlock_t *ptl;
6432 	enum mc_target_type target_type;
6433 	union mc_target target;
6434 	struct page *page;
6435 
6436 	ptl = pmd_trans_huge_lock(pmd, vma);
6437 	if (ptl) {
6438 		if (mc.precharge < HPAGE_PMD_NR) {
6439 			spin_unlock(ptl);
6440 			return 0;
6441 		}
6442 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6443 		if (target_type == MC_TARGET_PAGE) {
6444 			page = target.page;
6445 			if (isolate_lru_page(page)) {
6446 				if (!mem_cgroup_move_account(page, true,
6447 							     mc.from, mc.to)) {
6448 					mc.precharge -= HPAGE_PMD_NR;
6449 					mc.moved_charge += HPAGE_PMD_NR;
6450 				}
6451 				putback_lru_page(page);
6452 			}
6453 			unlock_page(page);
6454 			put_page(page);
6455 		} else if (target_type == MC_TARGET_DEVICE) {
6456 			page = target.page;
6457 			if (!mem_cgroup_move_account(page, true,
6458 						     mc.from, mc.to)) {
6459 				mc.precharge -= HPAGE_PMD_NR;
6460 				mc.moved_charge += HPAGE_PMD_NR;
6461 			}
6462 			unlock_page(page);
6463 			put_page(page);
6464 		}
6465 		spin_unlock(ptl);
6466 		return 0;
6467 	}
6468 
6469 retry:
6470 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6471 	if (!pte)
6472 		return 0;
6473 	for (; addr != end; addr += PAGE_SIZE) {
6474 		pte_t ptent = ptep_get(pte++);
6475 		bool device = false;
6476 		swp_entry_t ent;
6477 
6478 		if (!mc.precharge)
6479 			break;
6480 
6481 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6482 		case MC_TARGET_DEVICE:
6483 			device = true;
6484 			fallthrough;
6485 		case MC_TARGET_PAGE:
6486 			page = target.page;
6487 			/*
6488 			 * We can have a part of the split pmd here. Moving it
6489 			 * can be done but it would be too convoluted so simply
6490 			 * ignore such a partial THP and keep it in original
6491 			 * memcg. There should be somebody mapping the head.
6492 			 */
6493 			if (PageTransCompound(page))
6494 				goto put;
6495 			if (!device && !isolate_lru_page(page))
6496 				goto put;
6497 			if (!mem_cgroup_move_account(page, false,
6498 						mc.from, mc.to)) {
6499 				mc.precharge--;
6500 				/* we uncharge from mc.from later. */
6501 				mc.moved_charge++;
6502 			}
6503 			if (!device)
6504 				putback_lru_page(page);
6505 put:			/* get_mctgt_type() gets & locks the page */
6506 			unlock_page(page);
6507 			put_page(page);
6508 			break;
6509 		case MC_TARGET_SWAP:
6510 			ent = target.ent;
6511 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6512 				mc.precharge--;
6513 				mem_cgroup_id_get_many(mc.to, 1);
6514 				/* we fixup other refcnts and charges later. */
6515 				mc.moved_swap++;
6516 			}
6517 			break;
6518 		default:
6519 			break;
6520 		}
6521 	}
6522 	pte_unmap_unlock(pte - 1, ptl);
6523 	cond_resched();
6524 
6525 	if (addr != end) {
6526 		/*
6527 		 * We have consumed all precharges we got in can_attach().
6528 		 * We try charge one by one, but don't do any additional
6529 		 * charges to mc.to if we have failed in charge once in attach()
6530 		 * phase.
6531 		 */
6532 		ret = mem_cgroup_do_precharge(1);
6533 		if (!ret)
6534 			goto retry;
6535 	}
6536 
6537 	return ret;
6538 }
6539 
6540 static const struct mm_walk_ops charge_walk_ops = {
6541 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6542 	.walk_lock	= PGWALK_RDLOCK,
6543 };
6544 
6545 static void mem_cgroup_move_charge(void)
6546 {
6547 	lru_add_drain_all();
6548 	/*
6549 	 * Signal folio_memcg_lock() to take the memcg's move_lock
6550 	 * while we're moving its pages to another memcg. Then wait
6551 	 * for already started RCU-only updates to finish.
6552 	 */
6553 	atomic_inc(&mc.from->moving_account);
6554 	synchronize_rcu();
6555 retry:
6556 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6557 		/*
6558 		 * Someone who are holding the mmap_lock might be waiting in
6559 		 * waitq. So we cancel all extra charges, wake up all waiters,
6560 		 * and retry. Because we cancel precharges, we might not be able
6561 		 * to move enough charges, but moving charge is a best-effort
6562 		 * feature anyway, so it wouldn't be a big problem.
6563 		 */
6564 		__mem_cgroup_clear_mc();
6565 		cond_resched();
6566 		goto retry;
6567 	}
6568 	/*
6569 	 * When we have consumed all precharges and failed in doing
6570 	 * additional charge, the page walk just aborts.
6571 	 */
6572 	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6573 	mmap_read_unlock(mc.mm);
6574 	atomic_dec(&mc.from->moving_account);
6575 }
6576 
6577 static void mem_cgroup_move_task(void)
6578 {
6579 	if (mc.to) {
6580 		mem_cgroup_move_charge();
6581 		mem_cgroup_clear_mc();
6582 	}
6583 }
6584 
6585 #else	/* !CONFIG_MMU */
6586 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6587 {
6588 	return 0;
6589 }
6590 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6591 {
6592 }
6593 static void mem_cgroup_move_task(void)
6594 {
6595 }
6596 #endif
6597 
6598 #ifdef CONFIG_MEMCG_KMEM
6599 static void mem_cgroup_fork(struct task_struct *task)
6600 {
6601 	/*
6602 	 * Set the update flag to cause task->objcg to be initialized lazily
6603 	 * on the first allocation. It can be done without any synchronization
6604 	 * because it's always performed on the current task, so does
6605 	 * current_objcg_update().
6606 	 */
6607 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6608 }
6609 
6610 static void mem_cgroup_exit(struct task_struct *task)
6611 {
6612 	struct obj_cgroup *objcg = task->objcg;
6613 
6614 	objcg = (struct obj_cgroup *)
6615 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6616 	if (objcg)
6617 		obj_cgroup_put(objcg);
6618 
6619 	/*
6620 	 * Some kernel allocations can happen after this point,
6621 	 * but let's ignore them. It can be done without any synchronization
6622 	 * because it's always performed on the current task, so does
6623 	 * current_objcg_update().
6624 	 */
6625 	task->objcg = NULL;
6626 }
6627 #endif
6628 
6629 #ifdef CONFIG_LRU_GEN
6630 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6631 {
6632 	struct task_struct *task;
6633 	struct cgroup_subsys_state *css;
6634 
6635 	/* find the first leader if there is any */
6636 	cgroup_taskset_for_each_leader(task, css, tset)
6637 		break;
6638 
6639 	if (!task)
6640 		return;
6641 
6642 	task_lock(task);
6643 	if (task->mm && READ_ONCE(task->mm->owner) == task)
6644 		lru_gen_migrate_mm(task->mm);
6645 	task_unlock(task);
6646 }
6647 #else
6648 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6649 #endif /* CONFIG_LRU_GEN */
6650 
6651 #ifdef CONFIG_MEMCG_KMEM
6652 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6653 {
6654 	struct task_struct *task;
6655 	struct cgroup_subsys_state *css;
6656 
6657 	cgroup_taskset_for_each(task, css, tset) {
6658 		/* atomically set the update bit */
6659 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6660 	}
6661 }
6662 #else
6663 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6664 #endif /* CONFIG_MEMCG_KMEM */
6665 
6666 #if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6667 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6668 {
6669 	mem_cgroup_lru_gen_attach(tset);
6670 	mem_cgroup_kmem_attach(tset);
6671 }
6672 #endif
6673 
6674 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6675 {
6676 	if (value == PAGE_COUNTER_MAX)
6677 		seq_puts(m, "max\n");
6678 	else
6679 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6680 
6681 	return 0;
6682 }
6683 
6684 static u64 memory_current_read(struct cgroup_subsys_state *css,
6685 			       struct cftype *cft)
6686 {
6687 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6688 
6689 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6690 }
6691 
6692 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6693 			    struct cftype *cft)
6694 {
6695 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6696 
6697 	return (u64)memcg->memory.watermark * PAGE_SIZE;
6698 }
6699 
6700 static int memory_min_show(struct seq_file *m, void *v)
6701 {
6702 	return seq_puts_memcg_tunable(m,
6703 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6704 }
6705 
6706 static ssize_t memory_min_write(struct kernfs_open_file *of,
6707 				char *buf, size_t nbytes, loff_t off)
6708 {
6709 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6710 	unsigned long min;
6711 	int err;
6712 
6713 	buf = strstrip(buf);
6714 	err = page_counter_memparse(buf, "max", &min);
6715 	if (err)
6716 		return err;
6717 
6718 	page_counter_set_min(&memcg->memory, min);
6719 
6720 	return nbytes;
6721 }
6722 
6723 static int memory_low_show(struct seq_file *m, void *v)
6724 {
6725 	return seq_puts_memcg_tunable(m,
6726 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6727 }
6728 
6729 static ssize_t memory_low_write(struct kernfs_open_file *of,
6730 				char *buf, size_t nbytes, loff_t off)
6731 {
6732 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6733 	unsigned long low;
6734 	int err;
6735 
6736 	buf = strstrip(buf);
6737 	err = page_counter_memparse(buf, "max", &low);
6738 	if (err)
6739 		return err;
6740 
6741 	page_counter_set_low(&memcg->memory, low);
6742 
6743 	return nbytes;
6744 }
6745 
6746 static int memory_high_show(struct seq_file *m, void *v)
6747 {
6748 	return seq_puts_memcg_tunable(m,
6749 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6750 }
6751 
6752 static ssize_t memory_high_write(struct kernfs_open_file *of,
6753 				 char *buf, size_t nbytes, loff_t off)
6754 {
6755 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6756 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6757 	bool drained = false;
6758 	unsigned long high;
6759 	int err;
6760 
6761 	buf = strstrip(buf);
6762 	err = page_counter_memparse(buf, "max", &high);
6763 	if (err)
6764 		return err;
6765 
6766 	page_counter_set_high(&memcg->memory, high);
6767 
6768 	for (;;) {
6769 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6770 		unsigned long reclaimed;
6771 
6772 		if (nr_pages <= high)
6773 			break;
6774 
6775 		if (signal_pending(current))
6776 			break;
6777 
6778 		if (!drained) {
6779 			drain_all_stock(memcg);
6780 			drained = true;
6781 			continue;
6782 		}
6783 
6784 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6785 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6786 
6787 		if (!reclaimed && !nr_retries--)
6788 			break;
6789 	}
6790 
6791 	memcg_wb_domain_size_changed(memcg);
6792 	return nbytes;
6793 }
6794 
6795 static int memory_max_show(struct seq_file *m, void *v)
6796 {
6797 	return seq_puts_memcg_tunable(m,
6798 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6799 }
6800 
6801 static ssize_t memory_max_write(struct kernfs_open_file *of,
6802 				char *buf, size_t nbytes, loff_t off)
6803 {
6804 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6805 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6806 	bool drained = false;
6807 	unsigned long max;
6808 	int err;
6809 
6810 	buf = strstrip(buf);
6811 	err = page_counter_memparse(buf, "max", &max);
6812 	if (err)
6813 		return err;
6814 
6815 	xchg(&memcg->memory.max, max);
6816 
6817 	for (;;) {
6818 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6819 
6820 		if (nr_pages <= max)
6821 			break;
6822 
6823 		if (signal_pending(current))
6824 			break;
6825 
6826 		if (!drained) {
6827 			drain_all_stock(memcg);
6828 			drained = true;
6829 			continue;
6830 		}
6831 
6832 		if (nr_reclaims) {
6833 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6834 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6835 				nr_reclaims--;
6836 			continue;
6837 		}
6838 
6839 		memcg_memory_event(memcg, MEMCG_OOM);
6840 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6841 			break;
6842 	}
6843 
6844 	memcg_wb_domain_size_changed(memcg);
6845 	return nbytes;
6846 }
6847 
6848 /*
6849  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6850  * if any new events become available.
6851  */
6852 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6853 {
6854 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6855 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6856 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6857 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6858 	seq_printf(m, "oom_kill %lu\n",
6859 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6860 	seq_printf(m, "oom_group_kill %lu\n",
6861 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6862 }
6863 
6864 static int memory_events_show(struct seq_file *m, void *v)
6865 {
6866 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6867 
6868 	__memory_events_show(m, memcg->memory_events);
6869 	return 0;
6870 }
6871 
6872 static int memory_events_local_show(struct seq_file *m, void *v)
6873 {
6874 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6875 
6876 	__memory_events_show(m, memcg->memory_events_local);
6877 	return 0;
6878 }
6879 
6880 static int memory_stat_show(struct seq_file *m, void *v)
6881 {
6882 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6883 	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6884 	struct seq_buf s;
6885 
6886 	if (!buf)
6887 		return -ENOMEM;
6888 	seq_buf_init(&s, buf, PAGE_SIZE);
6889 	memory_stat_format(memcg, &s);
6890 	seq_puts(m, buf);
6891 	kfree(buf);
6892 	return 0;
6893 }
6894 
6895 #ifdef CONFIG_NUMA
6896 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6897 						     int item)
6898 {
6899 	return lruvec_page_state(lruvec, item) *
6900 		memcg_page_state_output_unit(item);
6901 }
6902 
6903 static int memory_numa_stat_show(struct seq_file *m, void *v)
6904 {
6905 	int i;
6906 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6907 
6908 	mem_cgroup_flush_stats(memcg);
6909 
6910 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6911 		int nid;
6912 
6913 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6914 			continue;
6915 
6916 		seq_printf(m, "%s", memory_stats[i].name);
6917 		for_each_node_state(nid, N_MEMORY) {
6918 			u64 size;
6919 			struct lruvec *lruvec;
6920 
6921 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6922 			size = lruvec_page_state_output(lruvec,
6923 							memory_stats[i].idx);
6924 			seq_printf(m, " N%d=%llu", nid, size);
6925 		}
6926 		seq_putc(m, '\n');
6927 	}
6928 
6929 	return 0;
6930 }
6931 #endif
6932 
6933 static int memory_oom_group_show(struct seq_file *m, void *v)
6934 {
6935 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6936 
6937 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6938 
6939 	return 0;
6940 }
6941 
6942 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6943 				      char *buf, size_t nbytes, loff_t off)
6944 {
6945 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6946 	int ret, oom_group;
6947 
6948 	buf = strstrip(buf);
6949 	if (!buf)
6950 		return -EINVAL;
6951 
6952 	ret = kstrtoint(buf, 0, &oom_group);
6953 	if (ret)
6954 		return ret;
6955 
6956 	if (oom_group != 0 && oom_group != 1)
6957 		return -EINVAL;
6958 
6959 	WRITE_ONCE(memcg->oom_group, oom_group);
6960 
6961 	return nbytes;
6962 }
6963 
6964 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6965 			      size_t nbytes, loff_t off)
6966 {
6967 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6968 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6969 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6970 	unsigned int reclaim_options;
6971 	int err;
6972 
6973 	buf = strstrip(buf);
6974 	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6975 	if (err)
6976 		return err;
6977 
6978 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6979 	while (nr_reclaimed < nr_to_reclaim) {
6980 		unsigned long reclaimed;
6981 
6982 		if (signal_pending(current))
6983 			return -EINTR;
6984 
6985 		/*
6986 		 * This is the final attempt, drain percpu lru caches in the
6987 		 * hope of introducing more evictable pages for
6988 		 * try_to_free_mem_cgroup_pages().
6989 		 */
6990 		if (!nr_retries)
6991 			lru_add_drain_all();
6992 
6993 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6994 					min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX),
6995 					GFP_KERNEL, reclaim_options);
6996 
6997 		if (!reclaimed && !nr_retries--)
6998 			return -EAGAIN;
6999 
7000 		nr_reclaimed += reclaimed;
7001 	}
7002 
7003 	return nbytes;
7004 }
7005 
7006 static struct cftype memory_files[] = {
7007 	{
7008 		.name = "current",
7009 		.flags = CFTYPE_NOT_ON_ROOT,
7010 		.read_u64 = memory_current_read,
7011 	},
7012 	{
7013 		.name = "peak",
7014 		.flags = CFTYPE_NOT_ON_ROOT,
7015 		.read_u64 = memory_peak_read,
7016 	},
7017 	{
7018 		.name = "min",
7019 		.flags = CFTYPE_NOT_ON_ROOT,
7020 		.seq_show = memory_min_show,
7021 		.write = memory_min_write,
7022 	},
7023 	{
7024 		.name = "low",
7025 		.flags = CFTYPE_NOT_ON_ROOT,
7026 		.seq_show = memory_low_show,
7027 		.write = memory_low_write,
7028 	},
7029 	{
7030 		.name = "high",
7031 		.flags = CFTYPE_NOT_ON_ROOT,
7032 		.seq_show = memory_high_show,
7033 		.write = memory_high_write,
7034 	},
7035 	{
7036 		.name = "max",
7037 		.flags = CFTYPE_NOT_ON_ROOT,
7038 		.seq_show = memory_max_show,
7039 		.write = memory_max_write,
7040 	},
7041 	{
7042 		.name = "events",
7043 		.flags = CFTYPE_NOT_ON_ROOT,
7044 		.file_offset = offsetof(struct mem_cgroup, events_file),
7045 		.seq_show = memory_events_show,
7046 	},
7047 	{
7048 		.name = "events.local",
7049 		.flags = CFTYPE_NOT_ON_ROOT,
7050 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
7051 		.seq_show = memory_events_local_show,
7052 	},
7053 	{
7054 		.name = "stat",
7055 		.seq_show = memory_stat_show,
7056 	},
7057 #ifdef CONFIG_NUMA
7058 	{
7059 		.name = "numa_stat",
7060 		.seq_show = memory_numa_stat_show,
7061 	},
7062 #endif
7063 	{
7064 		.name = "oom.group",
7065 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7066 		.seq_show = memory_oom_group_show,
7067 		.write = memory_oom_group_write,
7068 	},
7069 	{
7070 		.name = "reclaim",
7071 		.flags = CFTYPE_NS_DELEGATABLE,
7072 		.write = memory_reclaim,
7073 	},
7074 	{ }	/* terminate */
7075 };
7076 
7077 struct cgroup_subsys memory_cgrp_subsys = {
7078 	.css_alloc = mem_cgroup_css_alloc,
7079 	.css_online = mem_cgroup_css_online,
7080 	.css_offline = mem_cgroup_css_offline,
7081 	.css_released = mem_cgroup_css_released,
7082 	.css_free = mem_cgroup_css_free,
7083 	.css_reset = mem_cgroup_css_reset,
7084 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
7085 	.can_attach = mem_cgroup_can_attach,
7086 #if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7087 	.attach = mem_cgroup_attach,
7088 #endif
7089 	.cancel_attach = mem_cgroup_cancel_attach,
7090 	.post_attach = mem_cgroup_move_task,
7091 #ifdef CONFIG_MEMCG_KMEM
7092 	.fork = mem_cgroup_fork,
7093 	.exit = mem_cgroup_exit,
7094 #endif
7095 	.dfl_cftypes = memory_files,
7096 	.legacy_cftypes = mem_cgroup_legacy_files,
7097 	.early_init = 0,
7098 };
7099 
7100 /*
7101  * This function calculates an individual cgroup's effective
7102  * protection which is derived from its own memory.min/low, its
7103  * parent's and siblings' settings, as well as the actual memory
7104  * distribution in the tree.
7105  *
7106  * The following rules apply to the effective protection values:
7107  *
7108  * 1. At the first level of reclaim, effective protection is equal to
7109  *    the declared protection in memory.min and memory.low.
7110  *
7111  * 2. To enable safe delegation of the protection configuration, at
7112  *    subsequent levels the effective protection is capped to the
7113  *    parent's effective protection.
7114  *
7115  * 3. To make complex and dynamic subtrees easier to configure, the
7116  *    user is allowed to overcommit the declared protection at a given
7117  *    level. If that is the case, the parent's effective protection is
7118  *    distributed to the children in proportion to how much protection
7119  *    they have declared and how much of it they are utilizing.
7120  *
7121  *    This makes distribution proportional, but also work-conserving:
7122  *    if one cgroup claims much more protection than it uses memory,
7123  *    the unused remainder is available to its siblings.
7124  *
7125  * 4. Conversely, when the declared protection is undercommitted at a
7126  *    given level, the distribution of the larger parental protection
7127  *    budget is NOT proportional. A cgroup's protection from a sibling
7128  *    is capped to its own memory.min/low setting.
7129  *
7130  * 5. However, to allow protecting recursive subtrees from each other
7131  *    without having to declare each individual cgroup's fixed share
7132  *    of the ancestor's claim to protection, any unutilized -
7133  *    "floating" - protection from up the tree is distributed in
7134  *    proportion to each cgroup's *usage*. This makes the protection
7135  *    neutral wrt sibling cgroups and lets them compete freely over
7136  *    the shared parental protection budget, but it protects the
7137  *    subtree as a whole from neighboring subtrees.
7138  *
7139  * Note that 4. and 5. are not in conflict: 4. is about protecting
7140  * against immediate siblings whereas 5. is about protecting against
7141  * neighboring subtrees.
7142  */
7143 static unsigned long effective_protection(unsigned long usage,
7144 					  unsigned long parent_usage,
7145 					  unsigned long setting,
7146 					  unsigned long parent_effective,
7147 					  unsigned long siblings_protected)
7148 {
7149 	unsigned long protected;
7150 	unsigned long ep;
7151 
7152 	protected = min(usage, setting);
7153 	/*
7154 	 * If all cgroups at this level combined claim and use more
7155 	 * protection than what the parent affords them, distribute
7156 	 * shares in proportion to utilization.
7157 	 *
7158 	 * We are using actual utilization rather than the statically
7159 	 * claimed protection in order to be work-conserving: claimed
7160 	 * but unused protection is available to siblings that would
7161 	 * otherwise get a smaller chunk than what they claimed.
7162 	 */
7163 	if (siblings_protected > parent_effective)
7164 		return protected * parent_effective / siblings_protected;
7165 
7166 	/*
7167 	 * Ok, utilized protection of all children is within what the
7168 	 * parent affords them, so we know whatever this child claims
7169 	 * and utilizes is effectively protected.
7170 	 *
7171 	 * If there is unprotected usage beyond this value, reclaim
7172 	 * will apply pressure in proportion to that amount.
7173 	 *
7174 	 * If there is unutilized protection, the cgroup will be fully
7175 	 * shielded from reclaim, but we do return a smaller value for
7176 	 * protection than what the group could enjoy in theory. This
7177 	 * is okay. With the overcommit distribution above, effective
7178 	 * protection is always dependent on how memory is actually
7179 	 * consumed among the siblings anyway.
7180 	 */
7181 	ep = protected;
7182 
7183 	/*
7184 	 * If the children aren't claiming (all of) the protection
7185 	 * afforded to them by the parent, distribute the remainder in
7186 	 * proportion to the (unprotected) memory of each cgroup. That
7187 	 * way, cgroups that aren't explicitly prioritized wrt each
7188 	 * other compete freely over the allowance, but they are
7189 	 * collectively protected from neighboring trees.
7190 	 *
7191 	 * We're using unprotected memory for the weight so that if
7192 	 * some cgroups DO claim explicit protection, we don't protect
7193 	 * the same bytes twice.
7194 	 *
7195 	 * Check both usage and parent_usage against the respective
7196 	 * protected values. One should imply the other, but they
7197 	 * aren't read atomically - make sure the division is sane.
7198 	 */
7199 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7200 		return ep;
7201 	if (parent_effective > siblings_protected &&
7202 	    parent_usage > siblings_protected &&
7203 	    usage > protected) {
7204 		unsigned long unclaimed;
7205 
7206 		unclaimed = parent_effective - siblings_protected;
7207 		unclaimed *= usage - protected;
7208 		unclaimed /= parent_usage - siblings_protected;
7209 
7210 		ep += unclaimed;
7211 	}
7212 
7213 	return ep;
7214 }
7215 
7216 /**
7217  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7218  * @root: the top ancestor of the sub-tree being checked
7219  * @memcg: the memory cgroup to check
7220  *
7221  * WARNING: This function is not stateless! It can only be used as part
7222  *          of a top-down tree iteration, not for isolated queries.
7223  */
7224 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7225 				     struct mem_cgroup *memcg)
7226 {
7227 	unsigned long usage, parent_usage;
7228 	struct mem_cgroup *parent;
7229 
7230 	if (mem_cgroup_disabled())
7231 		return;
7232 
7233 	if (!root)
7234 		root = root_mem_cgroup;
7235 
7236 	/*
7237 	 * Effective values of the reclaim targets are ignored so they
7238 	 * can be stale. Have a look at mem_cgroup_protection for more
7239 	 * details.
7240 	 * TODO: calculation should be more robust so that we do not need
7241 	 * that special casing.
7242 	 */
7243 	if (memcg == root)
7244 		return;
7245 
7246 	usage = page_counter_read(&memcg->memory);
7247 	if (!usage)
7248 		return;
7249 
7250 	parent = parent_mem_cgroup(memcg);
7251 
7252 	if (parent == root) {
7253 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
7254 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
7255 		return;
7256 	}
7257 
7258 	parent_usage = page_counter_read(&parent->memory);
7259 
7260 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7261 			READ_ONCE(memcg->memory.min),
7262 			READ_ONCE(parent->memory.emin),
7263 			atomic_long_read(&parent->memory.children_min_usage)));
7264 
7265 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7266 			READ_ONCE(memcg->memory.low),
7267 			READ_ONCE(parent->memory.elow),
7268 			atomic_long_read(&parent->memory.children_low_usage)));
7269 }
7270 
7271 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7272 			gfp_t gfp)
7273 {
7274 	int ret;
7275 
7276 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7277 	if (ret)
7278 		goto out;
7279 
7280 	mem_cgroup_commit_charge(folio, memcg);
7281 out:
7282 	return ret;
7283 }
7284 
7285 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7286 {
7287 	struct mem_cgroup *memcg;
7288 	int ret;
7289 
7290 	memcg = get_mem_cgroup_from_mm(mm);
7291 	ret = charge_memcg(folio, memcg, gfp);
7292 	css_put(&memcg->css);
7293 
7294 	return ret;
7295 }
7296 
7297 /**
7298  * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7299  * @memcg: memcg to charge.
7300  * @gfp: reclaim mode.
7301  * @nr_pages: number of pages to charge.
7302  *
7303  * This function is called when allocating a huge page folio to determine if
7304  * the memcg has the capacity for it. It does not commit the charge yet,
7305  * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7306  *
7307  * Once we have obtained the hugetlb folio, we can call
7308  * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7309  * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7310  * of try_charge().
7311  *
7312  * Returns 0 on success. Otherwise, an error code is returned.
7313  */
7314 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7315 			long nr_pages)
7316 {
7317 	/*
7318 	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7319 	 * but do not attempt to commit charge later (or cancel on error) either.
7320 	 */
7321 	if (mem_cgroup_disabled() || !memcg ||
7322 		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7323 		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7324 		return -EOPNOTSUPP;
7325 
7326 	if (try_charge(memcg, gfp, nr_pages))
7327 		return -ENOMEM;
7328 
7329 	return 0;
7330 }
7331 
7332 /**
7333  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7334  * @folio: folio to charge.
7335  * @mm: mm context of the victim
7336  * @gfp: reclaim mode
7337  * @entry: swap entry for which the folio is allocated
7338  *
7339  * This function charges a folio allocated for swapin. Please call this before
7340  * adding the folio to the swapcache.
7341  *
7342  * Returns 0 on success. Otherwise, an error code is returned.
7343  */
7344 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7345 				  gfp_t gfp, swp_entry_t entry)
7346 {
7347 	struct mem_cgroup *memcg;
7348 	unsigned short id;
7349 	int ret;
7350 
7351 	if (mem_cgroup_disabled())
7352 		return 0;
7353 
7354 	id = lookup_swap_cgroup_id(entry);
7355 	rcu_read_lock();
7356 	memcg = mem_cgroup_from_id(id);
7357 	if (!memcg || !css_tryget_online(&memcg->css))
7358 		memcg = get_mem_cgroup_from_mm(mm);
7359 	rcu_read_unlock();
7360 
7361 	ret = charge_memcg(folio, memcg, gfp);
7362 
7363 	css_put(&memcg->css);
7364 	return ret;
7365 }
7366 
7367 /*
7368  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7369  * @entry: swap entry for which the page is charged
7370  *
7371  * Call this function after successfully adding the charged page to swapcache.
7372  *
7373  * Note: This function assumes the page for which swap slot is being uncharged
7374  * is order 0 page.
7375  */
7376 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7377 {
7378 	/*
7379 	 * Cgroup1's unified memory+swap counter has been charged with the
7380 	 * new swapcache page, finish the transfer by uncharging the swap
7381 	 * slot. The swap slot would also get uncharged when it dies, but
7382 	 * it can stick around indefinitely and we'd count the page twice
7383 	 * the entire time.
7384 	 *
7385 	 * Cgroup2 has separate resource counters for memory and swap,
7386 	 * so this is a non-issue here. Memory and swap charge lifetimes
7387 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7388 	 * page to memory here, and uncharge swap when the slot is freed.
7389 	 */
7390 	if (!mem_cgroup_disabled() && do_memsw_account()) {
7391 		/*
7392 		 * The swap entry might not get freed for a long time,
7393 		 * let's not wait for it.  The page already received a
7394 		 * memory+swap charge, drop the swap entry duplicate.
7395 		 */
7396 		mem_cgroup_uncharge_swap(entry, 1);
7397 	}
7398 }
7399 
7400 struct uncharge_gather {
7401 	struct mem_cgroup *memcg;
7402 	unsigned long nr_memory;
7403 	unsigned long pgpgout;
7404 	unsigned long nr_kmem;
7405 	int nid;
7406 };
7407 
7408 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7409 {
7410 	memset(ug, 0, sizeof(*ug));
7411 }
7412 
7413 static void uncharge_batch(const struct uncharge_gather *ug)
7414 {
7415 	unsigned long flags;
7416 
7417 	if (ug->nr_memory) {
7418 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7419 		if (do_memsw_account())
7420 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7421 		if (ug->nr_kmem)
7422 			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7423 		memcg_oom_recover(ug->memcg);
7424 	}
7425 
7426 	local_irq_save(flags);
7427 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7428 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7429 	memcg_check_events(ug->memcg, ug->nid);
7430 	local_irq_restore(flags);
7431 
7432 	/* drop reference from uncharge_folio */
7433 	css_put(&ug->memcg->css);
7434 }
7435 
7436 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7437 {
7438 	long nr_pages;
7439 	struct mem_cgroup *memcg;
7440 	struct obj_cgroup *objcg;
7441 
7442 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7443 
7444 	/*
7445 	 * Nobody should be changing or seriously looking at
7446 	 * folio memcg or objcg at this point, we have fully
7447 	 * exclusive access to the folio.
7448 	 */
7449 	if (folio_memcg_kmem(folio)) {
7450 		objcg = __folio_objcg(folio);
7451 		/*
7452 		 * This get matches the put at the end of the function and
7453 		 * kmem pages do not hold memcg references anymore.
7454 		 */
7455 		memcg = get_mem_cgroup_from_objcg(objcg);
7456 	} else {
7457 		memcg = __folio_memcg(folio);
7458 	}
7459 
7460 	if (!memcg)
7461 		return;
7462 
7463 	if (ug->memcg != memcg) {
7464 		if (ug->memcg) {
7465 			uncharge_batch(ug);
7466 			uncharge_gather_clear(ug);
7467 		}
7468 		ug->memcg = memcg;
7469 		ug->nid = folio_nid(folio);
7470 
7471 		/* pairs with css_put in uncharge_batch */
7472 		css_get(&memcg->css);
7473 	}
7474 
7475 	nr_pages = folio_nr_pages(folio);
7476 
7477 	if (folio_memcg_kmem(folio)) {
7478 		ug->nr_memory += nr_pages;
7479 		ug->nr_kmem += nr_pages;
7480 
7481 		folio->memcg_data = 0;
7482 		obj_cgroup_put(objcg);
7483 	} else {
7484 		/* LRU pages aren't accounted at the root level */
7485 		if (!mem_cgroup_is_root(memcg))
7486 			ug->nr_memory += nr_pages;
7487 		ug->pgpgout++;
7488 
7489 		folio->memcg_data = 0;
7490 	}
7491 
7492 	css_put(&memcg->css);
7493 }
7494 
7495 void __mem_cgroup_uncharge(struct folio *folio)
7496 {
7497 	struct uncharge_gather ug;
7498 
7499 	/* Don't touch folio->lru of any random page, pre-check: */
7500 	if (!folio_memcg(folio))
7501 		return;
7502 
7503 	uncharge_gather_clear(&ug);
7504 	uncharge_folio(folio, &ug);
7505 	uncharge_batch(&ug);
7506 }
7507 
7508 /**
7509  * __mem_cgroup_uncharge_list - uncharge a list of page
7510  * @page_list: list of pages to uncharge
7511  *
7512  * Uncharge a list of pages previously charged with
7513  * __mem_cgroup_charge().
7514  */
7515 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7516 {
7517 	struct uncharge_gather ug;
7518 	struct folio *folio;
7519 
7520 	uncharge_gather_clear(&ug);
7521 	list_for_each_entry(folio, page_list, lru)
7522 		uncharge_folio(folio, &ug);
7523 	if (ug.memcg)
7524 		uncharge_batch(&ug);
7525 }
7526 
7527 /**
7528  * mem_cgroup_replace_folio - Charge a folio's replacement.
7529  * @old: Currently circulating folio.
7530  * @new: Replacement folio.
7531  *
7532  * Charge @new as a replacement folio for @old. @old will
7533  * be uncharged upon free. This is only used by the page cache
7534  * (in replace_page_cache_folio()).
7535  *
7536  * Both folios must be locked, @new->mapping must be set up.
7537  */
7538 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7539 {
7540 	struct mem_cgroup *memcg;
7541 	long nr_pages = folio_nr_pages(new);
7542 	unsigned long flags;
7543 
7544 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7545 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7546 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7547 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7548 
7549 	if (mem_cgroup_disabled())
7550 		return;
7551 
7552 	/* Page cache replacement: new folio already charged? */
7553 	if (folio_memcg(new))
7554 		return;
7555 
7556 	memcg = folio_memcg(old);
7557 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7558 	if (!memcg)
7559 		return;
7560 
7561 	/* Force-charge the new page. The old one will be freed soon */
7562 	if (!mem_cgroup_is_root(memcg)) {
7563 		page_counter_charge(&memcg->memory, nr_pages);
7564 		if (do_memsw_account())
7565 			page_counter_charge(&memcg->memsw, nr_pages);
7566 	}
7567 
7568 	css_get(&memcg->css);
7569 	commit_charge(new, memcg);
7570 
7571 	local_irq_save(flags);
7572 	mem_cgroup_charge_statistics(memcg, nr_pages);
7573 	memcg_check_events(memcg, folio_nid(new));
7574 	local_irq_restore(flags);
7575 }
7576 
7577 /**
7578  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7579  * @old: Currently circulating folio.
7580  * @new: Replacement folio.
7581  *
7582  * Transfer the memcg data from the old folio to the new folio for migration.
7583  * The old folio's data info will be cleared. Note that the memory counters
7584  * will remain unchanged throughout the process.
7585  *
7586  * Both folios must be locked, @new->mapping must be set up.
7587  */
7588 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7589 {
7590 	struct mem_cgroup *memcg;
7591 
7592 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7593 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7594 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7595 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7596 
7597 	if (mem_cgroup_disabled())
7598 		return;
7599 
7600 	memcg = folio_memcg(old);
7601 	/*
7602 	 * Note that it is normal to see !memcg for a hugetlb folio.
7603 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7604 	 * was not selected.
7605 	 */
7606 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7607 	if (!memcg)
7608 		return;
7609 
7610 	/* Transfer the charge and the css ref */
7611 	commit_charge(new, memcg);
7612 	/*
7613 	 * If the old folio is a large folio and is in the split queue, it needs
7614 	 * to be removed from the split queue now, in case getting an incorrect
7615 	 * split queue in destroy_large_folio() after the memcg of the old folio
7616 	 * is cleared.
7617 	 *
7618 	 * In addition, the old folio is about to be freed after migration, so
7619 	 * removing from the split queue a bit earlier seems reasonable.
7620 	 */
7621 	if (folio_test_large(old) && folio_test_large_rmappable(old))
7622 		folio_undo_large_rmappable(old);
7623 	old->memcg_data = 0;
7624 }
7625 
7626 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7627 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7628 
7629 void mem_cgroup_sk_alloc(struct sock *sk)
7630 {
7631 	struct mem_cgroup *memcg;
7632 
7633 	if (!mem_cgroup_sockets_enabled)
7634 		return;
7635 
7636 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7637 	if (!in_task())
7638 		return;
7639 
7640 	rcu_read_lock();
7641 	memcg = mem_cgroup_from_task(current);
7642 	if (mem_cgroup_is_root(memcg))
7643 		goto out;
7644 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7645 		goto out;
7646 	if (css_tryget(&memcg->css))
7647 		sk->sk_memcg = memcg;
7648 out:
7649 	rcu_read_unlock();
7650 }
7651 
7652 void mem_cgroup_sk_free(struct sock *sk)
7653 {
7654 	if (sk->sk_memcg)
7655 		css_put(&sk->sk_memcg->css);
7656 }
7657 
7658 /**
7659  * mem_cgroup_charge_skmem - charge socket memory
7660  * @memcg: memcg to charge
7661  * @nr_pages: number of pages to charge
7662  * @gfp_mask: reclaim mode
7663  *
7664  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7665  * @memcg's configured limit, %false if it doesn't.
7666  */
7667 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7668 			     gfp_t gfp_mask)
7669 {
7670 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7671 		struct page_counter *fail;
7672 
7673 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7674 			memcg->tcpmem_pressure = 0;
7675 			return true;
7676 		}
7677 		memcg->tcpmem_pressure = 1;
7678 		if (gfp_mask & __GFP_NOFAIL) {
7679 			page_counter_charge(&memcg->tcpmem, nr_pages);
7680 			return true;
7681 		}
7682 		return false;
7683 	}
7684 
7685 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7686 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7687 		return true;
7688 	}
7689 
7690 	return false;
7691 }
7692 
7693 /**
7694  * mem_cgroup_uncharge_skmem - uncharge socket memory
7695  * @memcg: memcg to uncharge
7696  * @nr_pages: number of pages to uncharge
7697  */
7698 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7699 {
7700 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7701 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7702 		return;
7703 	}
7704 
7705 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7706 
7707 	refill_stock(memcg, nr_pages);
7708 }
7709 
7710 static int __init cgroup_memory(char *s)
7711 {
7712 	char *token;
7713 
7714 	while ((token = strsep(&s, ",")) != NULL) {
7715 		if (!*token)
7716 			continue;
7717 		if (!strcmp(token, "nosocket"))
7718 			cgroup_memory_nosocket = true;
7719 		if (!strcmp(token, "nokmem"))
7720 			cgroup_memory_nokmem = true;
7721 		if (!strcmp(token, "nobpf"))
7722 			cgroup_memory_nobpf = true;
7723 	}
7724 	return 1;
7725 }
7726 __setup("cgroup.memory=", cgroup_memory);
7727 
7728 /*
7729  * subsys_initcall() for memory controller.
7730  *
7731  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7732  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7733  * basically everything that doesn't depend on a specific mem_cgroup structure
7734  * should be initialized from here.
7735  */
7736 static int __init mem_cgroup_init(void)
7737 {
7738 	int cpu, node;
7739 
7740 	/*
7741 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7742 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7743 	 * to work fine, we should make sure that the overfill threshold can't
7744 	 * exceed S32_MAX / PAGE_SIZE.
7745 	 */
7746 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7747 
7748 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7749 				  memcg_hotplug_cpu_dead);
7750 
7751 	for_each_possible_cpu(cpu)
7752 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7753 			  drain_local_stock);
7754 
7755 	for_each_node(node) {
7756 		struct mem_cgroup_tree_per_node *rtpn;
7757 
7758 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7759 
7760 		rtpn->rb_root = RB_ROOT;
7761 		rtpn->rb_rightmost = NULL;
7762 		spin_lock_init(&rtpn->lock);
7763 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7764 	}
7765 
7766 	return 0;
7767 }
7768 subsys_initcall(mem_cgroup_init);
7769 
7770 #ifdef CONFIG_SWAP
7771 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7772 {
7773 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7774 		/*
7775 		 * The root cgroup cannot be destroyed, so it's refcount must
7776 		 * always be >= 1.
7777 		 */
7778 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7779 			VM_BUG_ON(1);
7780 			break;
7781 		}
7782 		memcg = parent_mem_cgroup(memcg);
7783 		if (!memcg)
7784 			memcg = root_mem_cgroup;
7785 	}
7786 	return memcg;
7787 }
7788 
7789 /**
7790  * mem_cgroup_swapout - transfer a memsw charge to swap
7791  * @folio: folio whose memsw charge to transfer
7792  * @entry: swap entry to move the charge to
7793  *
7794  * Transfer the memsw charge of @folio to @entry.
7795  */
7796 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7797 {
7798 	struct mem_cgroup *memcg, *swap_memcg;
7799 	unsigned int nr_entries;
7800 	unsigned short oldid;
7801 
7802 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7803 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7804 
7805 	if (mem_cgroup_disabled())
7806 		return;
7807 
7808 	if (!do_memsw_account())
7809 		return;
7810 
7811 	memcg = folio_memcg(folio);
7812 
7813 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7814 	if (!memcg)
7815 		return;
7816 
7817 	/*
7818 	 * In case the memcg owning these pages has been offlined and doesn't
7819 	 * have an ID allocated to it anymore, charge the closest online
7820 	 * ancestor for the swap instead and transfer the memory+swap charge.
7821 	 */
7822 	swap_memcg = mem_cgroup_id_get_online(memcg);
7823 	nr_entries = folio_nr_pages(folio);
7824 	/* Get references for the tail pages, too */
7825 	if (nr_entries > 1)
7826 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7827 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7828 				   nr_entries);
7829 	VM_BUG_ON_FOLIO(oldid, folio);
7830 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7831 
7832 	folio->memcg_data = 0;
7833 
7834 	if (!mem_cgroup_is_root(memcg))
7835 		page_counter_uncharge(&memcg->memory, nr_entries);
7836 
7837 	if (memcg != swap_memcg) {
7838 		if (!mem_cgroup_is_root(swap_memcg))
7839 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7840 		page_counter_uncharge(&memcg->memsw, nr_entries);
7841 	}
7842 
7843 	/*
7844 	 * Interrupts should be disabled here because the caller holds the
7845 	 * i_pages lock which is taken with interrupts-off. It is
7846 	 * important here to have the interrupts disabled because it is the
7847 	 * only synchronisation we have for updating the per-CPU variables.
7848 	 */
7849 	memcg_stats_lock();
7850 	mem_cgroup_charge_statistics(memcg, -nr_entries);
7851 	memcg_stats_unlock();
7852 	memcg_check_events(memcg, folio_nid(folio));
7853 
7854 	css_put(&memcg->css);
7855 }
7856 
7857 /**
7858  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7859  * @folio: folio being added to swap
7860  * @entry: swap entry to charge
7861  *
7862  * Try to charge @folio's memcg for the swap space at @entry.
7863  *
7864  * Returns 0 on success, -ENOMEM on failure.
7865  */
7866 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7867 {
7868 	unsigned int nr_pages = folio_nr_pages(folio);
7869 	struct page_counter *counter;
7870 	struct mem_cgroup *memcg;
7871 	unsigned short oldid;
7872 
7873 	if (do_memsw_account())
7874 		return 0;
7875 
7876 	memcg = folio_memcg(folio);
7877 
7878 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7879 	if (!memcg)
7880 		return 0;
7881 
7882 	if (!entry.val) {
7883 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7884 		return 0;
7885 	}
7886 
7887 	memcg = mem_cgroup_id_get_online(memcg);
7888 
7889 	if (!mem_cgroup_is_root(memcg) &&
7890 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7891 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7892 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7893 		mem_cgroup_id_put(memcg);
7894 		return -ENOMEM;
7895 	}
7896 
7897 	/* Get references for the tail pages, too */
7898 	if (nr_pages > 1)
7899 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7900 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7901 	VM_BUG_ON_FOLIO(oldid, folio);
7902 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7903 
7904 	return 0;
7905 }
7906 
7907 /**
7908  * __mem_cgroup_uncharge_swap - uncharge swap space
7909  * @entry: swap entry to uncharge
7910  * @nr_pages: the amount of swap space to uncharge
7911  */
7912 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7913 {
7914 	struct mem_cgroup *memcg;
7915 	unsigned short id;
7916 
7917 	id = swap_cgroup_record(entry, 0, nr_pages);
7918 	rcu_read_lock();
7919 	memcg = mem_cgroup_from_id(id);
7920 	if (memcg) {
7921 		if (!mem_cgroup_is_root(memcg)) {
7922 			if (do_memsw_account())
7923 				page_counter_uncharge(&memcg->memsw, nr_pages);
7924 			else
7925 				page_counter_uncharge(&memcg->swap, nr_pages);
7926 		}
7927 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7928 		mem_cgroup_id_put_many(memcg, nr_pages);
7929 	}
7930 	rcu_read_unlock();
7931 }
7932 
7933 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7934 {
7935 	long nr_swap_pages = get_nr_swap_pages();
7936 
7937 	if (mem_cgroup_disabled() || do_memsw_account())
7938 		return nr_swap_pages;
7939 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7940 		nr_swap_pages = min_t(long, nr_swap_pages,
7941 				      READ_ONCE(memcg->swap.max) -
7942 				      page_counter_read(&memcg->swap));
7943 	return nr_swap_pages;
7944 }
7945 
7946 bool mem_cgroup_swap_full(struct folio *folio)
7947 {
7948 	struct mem_cgroup *memcg;
7949 
7950 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7951 
7952 	if (vm_swap_full())
7953 		return true;
7954 	if (do_memsw_account())
7955 		return false;
7956 
7957 	memcg = folio_memcg(folio);
7958 	if (!memcg)
7959 		return false;
7960 
7961 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7962 		unsigned long usage = page_counter_read(&memcg->swap);
7963 
7964 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7965 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7966 			return true;
7967 	}
7968 
7969 	return false;
7970 }
7971 
7972 static int __init setup_swap_account(char *s)
7973 {
7974 	bool res;
7975 
7976 	if (!kstrtobool(s, &res) && !res)
7977 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
7978 			     "in favor of configuring swap control via cgroupfs. "
7979 			     "Please report your usecase to linux-mm@kvack.org if you "
7980 			     "depend on this functionality.\n");
7981 	return 1;
7982 }
7983 __setup("swapaccount=", setup_swap_account);
7984 
7985 static u64 swap_current_read(struct cgroup_subsys_state *css,
7986 			     struct cftype *cft)
7987 {
7988 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7989 
7990 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7991 }
7992 
7993 static u64 swap_peak_read(struct cgroup_subsys_state *css,
7994 			  struct cftype *cft)
7995 {
7996 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7997 
7998 	return (u64)memcg->swap.watermark * PAGE_SIZE;
7999 }
8000 
8001 static int swap_high_show(struct seq_file *m, void *v)
8002 {
8003 	return seq_puts_memcg_tunable(m,
8004 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8005 }
8006 
8007 static ssize_t swap_high_write(struct kernfs_open_file *of,
8008 			       char *buf, size_t nbytes, loff_t off)
8009 {
8010 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8011 	unsigned long high;
8012 	int err;
8013 
8014 	buf = strstrip(buf);
8015 	err = page_counter_memparse(buf, "max", &high);
8016 	if (err)
8017 		return err;
8018 
8019 	page_counter_set_high(&memcg->swap, high);
8020 
8021 	return nbytes;
8022 }
8023 
8024 static int swap_max_show(struct seq_file *m, void *v)
8025 {
8026 	return seq_puts_memcg_tunable(m,
8027 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8028 }
8029 
8030 static ssize_t swap_max_write(struct kernfs_open_file *of,
8031 			      char *buf, size_t nbytes, loff_t off)
8032 {
8033 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8034 	unsigned long max;
8035 	int err;
8036 
8037 	buf = strstrip(buf);
8038 	err = page_counter_memparse(buf, "max", &max);
8039 	if (err)
8040 		return err;
8041 
8042 	xchg(&memcg->swap.max, max);
8043 
8044 	return nbytes;
8045 }
8046 
8047 static int swap_events_show(struct seq_file *m, void *v)
8048 {
8049 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8050 
8051 	seq_printf(m, "high %lu\n",
8052 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8053 	seq_printf(m, "max %lu\n",
8054 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8055 	seq_printf(m, "fail %lu\n",
8056 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8057 
8058 	return 0;
8059 }
8060 
8061 static struct cftype swap_files[] = {
8062 	{
8063 		.name = "swap.current",
8064 		.flags = CFTYPE_NOT_ON_ROOT,
8065 		.read_u64 = swap_current_read,
8066 	},
8067 	{
8068 		.name = "swap.high",
8069 		.flags = CFTYPE_NOT_ON_ROOT,
8070 		.seq_show = swap_high_show,
8071 		.write = swap_high_write,
8072 	},
8073 	{
8074 		.name = "swap.max",
8075 		.flags = CFTYPE_NOT_ON_ROOT,
8076 		.seq_show = swap_max_show,
8077 		.write = swap_max_write,
8078 	},
8079 	{
8080 		.name = "swap.peak",
8081 		.flags = CFTYPE_NOT_ON_ROOT,
8082 		.read_u64 = swap_peak_read,
8083 	},
8084 	{
8085 		.name = "swap.events",
8086 		.flags = CFTYPE_NOT_ON_ROOT,
8087 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
8088 		.seq_show = swap_events_show,
8089 	},
8090 	{ }	/* terminate */
8091 };
8092 
8093 static struct cftype memsw_files[] = {
8094 	{
8095 		.name = "memsw.usage_in_bytes",
8096 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8097 		.read_u64 = mem_cgroup_read_u64,
8098 	},
8099 	{
8100 		.name = "memsw.max_usage_in_bytes",
8101 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8102 		.write = mem_cgroup_reset,
8103 		.read_u64 = mem_cgroup_read_u64,
8104 	},
8105 	{
8106 		.name = "memsw.limit_in_bytes",
8107 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8108 		.write = mem_cgroup_write,
8109 		.read_u64 = mem_cgroup_read_u64,
8110 	},
8111 	{
8112 		.name = "memsw.failcnt",
8113 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8114 		.write = mem_cgroup_reset,
8115 		.read_u64 = mem_cgroup_read_u64,
8116 	},
8117 	{ },	/* terminate */
8118 };
8119 
8120 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8121 /**
8122  * obj_cgroup_may_zswap - check if this cgroup can zswap
8123  * @objcg: the object cgroup
8124  *
8125  * Check if the hierarchical zswap limit has been reached.
8126  *
8127  * This doesn't check for specific headroom, and it is not atomic
8128  * either. But with zswap, the size of the allocation is only known
8129  * once compression has occurred, and this optimistic pre-check avoids
8130  * spending cycles on compression when there is already no room left
8131  * or zswap is disabled altogether somewhere in the hierarchy.
8132  */
8133 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8134 {
8135 	struct mem_cgroup *memcg, *original_memcg;
8136 	bool ret = true;
8137 
8138 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8139 		return true;
8140 
8141 	original_memcg = get_mem_cgroup_from_objcg(objcg);
8142 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8143 	     memcg = parent_mem_cgroup(memcg)) {
8144 		unsigned long max = READ_ONCE(memcg->zswap_max);
8145 		unsigned long pages;
8146 
8147 		if (max == PAGE_COUNTER_MAX)
8148 			continue;
8149 		if (max == 0) {
8150 			ret = false;
8151 			break;
8152 		}
8153 
8154 		/*
8155 		 * mem_cgroup_flush_stats() ignores small changes. Use
8156 		 * do_flush_stats() directly to get accurate stats for charging.
8157 		 */
8158 		do_flush_stats(memcg);
8159 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8160 		if (pages < max)
8161 			continue;
8162 		ret = false;
8163 		break;
8164 	}
8165 	mem_cgroup_put(original_memcg);
8166 	return ret;
8167 }
8168 
8169 /**
8170  * obj_cgroup_charge_zswap - charge compression backend memory
8171  * @objcg: the object cgroup
8172  * @size: size of compressed object
8173  *
8174  * This forces the charge after obj_cgroup_may_zswap() allowed
8175  * compression and storage in zwap for this cgroup to go ahead.
8176  */
8177 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8178 {
8179 	struct mem_cgroup *memcg;
8180 
8181 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8182 		return;
8183 
8184 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8185 
8186 	/* PF_MEMALLOC context, charging must succeed */
8187 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8188 		VM_WARN_ON_ONCE(1);
8189 
8190 	rcu_read_lock();
8191 	memcg = obj_cgroup_memcg(objcg);
8192 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8193 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8194 	rcu_read_unlock();
8195 }
8196 
8197 /**
8198  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8199  * @objcg: the object cgroup
8200  * @size: size of compressed object
8201  *
8202  * Uncharges zswap memory on page in.
8203  */
8204 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8205 {
8206 	struct mem_cgroup *memcg;
8207 
8208 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8209 		return;
8210 
8211 	obj_cgroup_uncharge(objcg, size);
8212 
8213 	rcu_read_lock();
8214 	memcg = obj_cgroup_memcg(objcg);
8215 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8216 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8217 	rcu_read_unlock();
8218 }
8219 
8220 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8221 {
8222 	/* if zswap is disabled, do not block pages going to the swapping device */
8223 	return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8224 }
8225 
8226 static u64 zswap_current_read(struct cgroup_subsys_state *css,
8227 			      struct cftype *cft)
8228 {
8229 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8230 
8231 	mem_cgroup_flush_stats(memcg);
8232 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8233 }
8234 
8235 static int zswap_max_show(struct seq_file *m, void *v)
8236 {
8237 	return seq_puts_memcg_tunable(m,
8238 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8239 }
8240 
8241 static ssize_t zswap_max_write(struct kernfs_open_file *of,
8242 			       char *buf, size_t nbytes, loff_t off)
8243 {
8244 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8245 	unsigned long max;
8246 	int err;
8247 
8248 	buf = strstrip(buf);
8249 	err = page_counter_memparse(buf, "max", &max);
8250 	if (err)
8251 		return err;
8252 
8253 	xchg(&memcg->zswap_max, max);
8254 
8255 	return nbytes;
8256 }
8257 
8258 static int zswap_writeback_show(struct seq_file *m, void *v)
8259 {
8260 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8261 
8262 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8263 	return 0;
8264 }
8265 
8266 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8267 				char *buf, size_t nbytes, loff_t off)
8268 {
8269 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8270 	int zswap_writeback;
8271 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8272 
8273 	if (parse_ret)
8274 		return parse_ret;
8275 
8276 	if (zswap_writeback != 0 && zswap_writeback != 1)
8277 		return -EINVAL;
8278 
8279 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8280 	return nbytes;
8281 }
8282 
8283 static struct cftype zswap_files[] = {
8284 	{
8285 		.name = "zswap.current",
8286 		.flags = CFTYPE_NOT_ON_ROOT,
8287 		.read_u64 = zswap_current_read,
8288 	},
8289 	{
8290 		.name = "zswap.max",
8291 		.flags = CFTYPE_NOT_ON_ROOT,
8292 		.seq_show = zswap_max_show,
8293 		.write = zswap_max_write,
8294 	},
8295 	{
8296 		.name = "zswap.writeback",
8297 		.seq_show = zswap_writeback_show,
8298 		.write = zswap_writeback_write,
8299 	},
8300 	{ }	/* terminate */
8301 };
8302 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8303 
8304 static int __init mem_cgroup_swap_init(void)
8305 {
8306 	if (mem_cgroup_disabled())
8307 		return 0;
8308 
8309 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8310 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8311 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8312 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8313 #endif
8314 	return 0;
8315 }
8316 subsys_initcall(mem_cgroup_swap_init);
8317 
8318 #endif /* CONFIG_SWAP */
8319