xref: /linux/mm/memcontrol.c (revision 442d87c7db9e9e2a569a49d38f404b8b556b8719)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/cpuset.h>
33 #include <linux/sched/mm.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/hugetlb.h>
36 #include <linux/pagemap.h>
37 #include <linux/folio_batch.h>
38 #include <linux/vm_event_item.h>
39 #include <linux/smp.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
42 #include <linux/bit_spinlock.h>
43 #include <linux/rcupdate.h>
44 #include <linux/limits.h>
45 #include <linux/export.h>
46 #include <linux/list.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swapops.h>
51 #include <linux/spinlock.h>
52 #include <linux/fs.h>
53 #include <linux/seq_file.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71 
72 #include <linux/uaccess.h>
73 
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77 
78 #include <trace/events/vmscan.h>
79 
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82 
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 EXPORT_SYMBOL(root_mem_cgroup);
85 
86 /* Active memory cgroup to use from an interrupt context */
87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89 
90 /* Socket memory accounting disabled? */
91 static bool cgroup_memory_nosocket __ro_after_init;
92 
93 /* Kernel memory accounting disabled? */
94 static bool cgroup_memory_nokmem __ro_after_init;
95 
96 /* BPF memory accounting disabled? */
97 static bool cgroup_memory_nobpf __ro_after_init;
98 
99 static struct workqueue_struct *memcg_wq __ro_after_init;
100 
101 static struct kmem_cache *memcg_cachep;
102 static struct kmem_cache *memcg_pn_cachep;
103 
104 #ifdef CONFIG_CGROUP_WRITEBACK
105 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
106 #endif
107 
108 static inline bool task_is_dying(void)
109 {
110 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
111 		(current->flags & PF_EXITING);
112 }
113 
114 /* Some nice accessors for the vmpressure. */
115 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
116 {
117 	if (!memcg)
118 		memcg = root_mem_cgroup;
119 	return &memcg->vmpressure;
120 }
121 
122 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
123 {
124 	return container_of(vmpr, struct mem_cgroup, vmpressure);
125 }
126 
127 #define SEQ_BUF_SIZE SZ_4K
128 #define CURRENT_OBJCG_UPDATE_BIT 0
129 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
130 
131 static DEFINE_SPINLOCK(objcg_lock);
132 
133 bool mem_cgroup_kmem_disabled(void)
134 {
135 	return cgroup_memory_nokmem;
136 }
137 
138 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
139 
140 static void obj_cgroup_release(struct percpu_ref *ref)
141 {
142 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
143 	unsigned int nr_bytes;
144 	unsigned int nr_pages;
145 	unsigned long flags;
146 
147 	/*
148 	 * At this point all allocated objects are freed, and
149 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
150 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
151 	 *
152 	 * The following sequence can lead to it:
153 	 * 1) CPU0: objcg == stock->cached_objcg
154 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
155 	 *          PAGE_SIZE bytes are charged
156 	 * 3) CPU1: a process from another memcg is allocating something,
157 	 *          the stock if flushed,
158 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
159 	 * 5) CPU0: we do release this object,
160 	 *          92 bytes are added to stock->nr_bytes
161 	 * 6) CPU0: stock is flushed,
162 	 *          92 bytes are added to objcg->nr_charged_bytes
163 	 *
164 	 * In the result, nr_charged_bytes == PAGE_SIZE.
165 	 * This page will be uncharged in obj_cgroup_release().
166 	 */
167 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
168 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
169 	nr_pages = nr_bytes >> PAGE_SHIFT;
170 
171 	if (nr_pages) {
172 		struct mem_cgroup *memcg;
173 
174 		memcg = get_mem_cgroup_from_objcg(objcg);
175 		mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
176 		memcg1_account_kmem(memcg, -nr_pages);
177 		if (!mem_cgroup_is_root(memcg))
178 			memcg_uncharge(memcg, nr_pages);
179 		mem_cgroup_put(memcg);
180 	}
181 
182 	spin_lock_irqsave(&objcg_lock, flags);
183 	list_del(&objcg->list);
184 	spin_unlock_irqrestore(&objcg_lock, flags);
185 
186 	percpu_ref_exit(ref);
187 	kfree_rcu(objcg, rcu);
188 }
189 
190 static struct obj_cgroup *obj_cgroup_alloc(void)
191 {
192 	struct obj_cgroup *objcg;
193 	int ret;
194 
195 	objcg = kzalloc_obj(struct obj_cgroup);
196 	if (!objcg)
197 		return NULL;
198 
199 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
200 			      GFP_KERNEL);
201 	if (ret) {
202 		kfree(objcg);
203 		return NULL;
204 	}
205 	INIT_LIST_HEAD(&objcg->list);
206 	return objcg;
207 }
208 
209 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
210 				  struct mem_cgroup *parent)
211 {
212 	struct obj_cgroup *objcg, *iter;
213 
214 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
215 
216 	spin_lock_irq(&objcg_lock);
217 
218 	/* 1) Ready to reparent active objcg. */
219 	list_add(&objcg->list, &memcg->objcg_list);
220 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
221 	list_for_each_entry(iter, &memcg->objcg_list, list)
222 		WRITE_ONCE(iter->memcg, parent);
223 	/* 3) Move already reparented objcgs to the parent's list */
224 	list_splice(&memcg->objcg_list, &parent->objcg_list);
225 
226 	spin_unlock_irq(&objcg_lock);
227 
228 	percpu_ref_kill(&objcg->refcnt);
229 }
230 
231 /*
232  * A lot of the calls to the cache allocation functions are expected to be
233  * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
234  * conditional to this static branch, we'll have to allow modules that does
235  * kmem_cache_alloc and the such to see this symbol as well
236  */
237 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
238 EXPORT_SYMBOL(memcg_kmem_online_key);
239 
240 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
241 EXPORT_SYMBOL(memcg_bpf_enabled_key);
242 
243 /**
244  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
245  * @folio: folio of interest
246  *
247  * If memcg is bound to the default hierarchy, css of the memcg associated
248  * with @folio is returned.  The returned css remains associated with @folio
249  * until it is released.
250  *
251  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
252  * is returned.
253  */
254 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
255 {
256 	struct mem_cgroup *memcg = folio_memcg(folio);
257 
258 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
259 		memcg = root_mem_cgroup;
260 
261 	return &memcg->css;
262 }
263 
264 /**
265  * page_cgroup_ino - return inode number of the memcg a page is charged to
266  * @page: the page
267  *
268  * Look up the closest online ancestor of the memory cgroup @page is charged to
269  * and return its inode number or 0 if @page is not charged to any cgroup. It
270  * is safe to call this function without holding a reference to @page.
271  *
272  * Note, this function is inherently racy, because there is nothing to prevent
273  * the cgroup inode from getting torn down and potentially reallocated a moment
274  * after page_cgroup_ino() returns, so it only should be used by callers that
275  * do not care (such as procfs interfaces).
276  */
277 ino_t page_cgroup_ino(struct page *page)
278 {
279 	struct mem_cgroup *memcg;
280 	unsigned long ino = 0;
281 
282 	rcu_read_lock();
283 	/* page_folio() is racy here, but the entire function is racy anyway */
284 	memcg = folio_memcg_check(page_folio(page));
285 
286 	while (memcg && !css_is_online(&memcg->css))
287 		memcg = parent_mem_cgroup(memcg);
288 	if (memcg)
289 		ino = cgroup_ino(memcg->css.cgroup);
290 	rcu_read_unlock();
291 	return ino;
292 }
293 EXPORT_SYMBOL_GPL(page_cgroup_ino);
294 
295 /* Subset of node_stat_item for memcg stats */
296 static const unsigned int memcg_node_stat_items[] = {
297 	NR_INACTIVE_ANON,
298 	NR_ACTIVE_ANON,
299 	NR_INACTIVE_FILE,
300 	NR_ACTIVE_FILE,
301 	NR_UNEVICTABLE,
302 	NR_SLAB_RECLAIMABLE_B,
303 	NR_SLAB_UNRECLAIMABLE_B,
304 	WORKINGSET_REFAULT_ANON,
305 	WORKINGSET_REFAULT_FILE,
306 	WORKINGSET_ACTIVATE_ANON,
307 	WORKINGSET_ACTIVATE_FILE,
308 	WORKINGSET_RESTORE_ANON,
309 	WORKINGSET_RESTORE_FILE,
310 	WORKINGSET_NODERECLAIM,
311 	NR_ANON_MAPPED,
312 	NR_FILE_MAPPED,
313 	NR_FILE_PAGES,
314 	NR_FILE_DIRTY,
315 	NR_WRITEBACK,
316 	NR_SHMEM,
317 	NR_SHMEM_THPS,
318 	NR_FILE_THPS,
319 	NR_ANON_THPS,
320 	NR_VMALLOC,
321 	NR_KERNEL_STACK_KB,
322 	NR_PAGETABLE,
323 	NR_SECONDARY_PAGETABLE,
324 #ifdef CONFIG_SWAP
325 	NR_SWAPCACHE,
326 #endif
327 #ifdef CONFIG_NUMA_BALANCING
328 	PGPROMOTE_SUCCESS,
329 #endif
330 	PGDEMOTE_KSWAPD,
331 	PGDEMOTE_DIRECT,
332 	PGDEMOTE_KHUGEPAGED,
333 	PGDEMOTE_PROACTIVE,
334 	PGSTEAL_KSWAPD,
335 	PGSTEAL_DIRECT,
336 	PGSTEAL_KHUGEPAGED,
337 	PGSTEAL_PROACTIVE,
338 	PGSTEAL_ANON,
339 	PGSTEAL_FILE,
340 	PGSCAN_KSWAPD,
341 	PGSCAN_DIRECT,
342 	PGSCAN_KHUGEPAGED,
343 	PGSCAN_PROACTIVE,
344 	PGSCAN_ANON,
345 	PGSCAN_FILE,
346 	PGREFILL,
347 #ifdef CONFIG_HUGETLB_PAGE
348 	NR_HUGETLB,
349 #endif
350 };
351 
352 static const unsigned int memcg_stat_items[] = {
353 	MEMCG_SWAP,
354 	MEMCG_SOCK,
355 	MEMCG_PERCPU_B,
356 	MEMCG_KMEM,
357 	MEMCG_ZSWAP_B,
358 	MEMCG_ZSWAPPED,
359 	MEMCG_ZSWAP_INCOMP,
360 };
361 
362 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
363 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
364 			   ARRAY_SIZE(memcg_stat_items))
365 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
366 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
367 
368 static void init_memcg_stats(void)
369 {
370 	u8 i, j = 0;
371 
372 	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
373 
374 	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
375 
376 	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
377 		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
378 
379 	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
380 		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
381 }
382 
383 static inline int memcg_stats_index(int idx)
384 {
385 	return mem_cgroup_stats_index[idx];
386 }
387 
388 struct lruvec_stats_percpu {
389 	/* Local (CPU and cgroup) state */
390 	long state[NR_MEMCG_NODE_STAT_ITEMS];
391 
392 	/* Delta calculation for lockless upward propagation */
393 	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
394 };
395 
396 struct lruvec_stats {
397 	/* Aggregated (CPU and subtree) state */
398 	long state[NR_MEMCG_NODE_STAT_ITEMS];
399 
400 	/* Non-hierarchical (CPU aggregated) state */
401 	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
402 
403 	/* Pending child counts during tree propagation */
404 	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
405 };
406 
407 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
408 {
409 	struct mem_cgroup_per_node *pn;
410 	long x;
411 	int i;
412 
413 	if (mem_cgroup_disabled())
414 		return node_page_state(lruvec_pgdat(lruvec), idx);
415 
416 	i = memcg_stats_index(idx);
417 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
418 		return 0;
419 
420 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
421 	x = READ_ONCE(pn->lruvec_stats->state[i]);
422 #ifdef CONFIG_SMP
423 	if (x < 0)
424 		x = 0;
425 #endif
426 	return x;
427 }
428 
429 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
430 				      enum node_stat_item idx)
431 {
432 	struct mem_cgroup_per_node *pn;
433 	long x;
434 	int i;
435 
436 	if (mem_cgroup_disabled())
437 		return node_page_state(lruvec_pgdat(lruvec), idx);
438 
439 	i = memcg_stats_index(idx);
440 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
441 		return 0;
442 
443 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
444 	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
445 #ifdef CONFIG_SMP
446 	if (x < 0)
447 		x = 0;
448 #endif
449 	return x;
450 }
451 
452 /* Subset of vm_event_item to report for memcg event stats */
453 static const unsigned int memcg_vm_event_stat[] = {
454 #ifdef CONFIG_MEMCG_V1
455 	PGPGIN,
456 	PGPGOUT,
457 #endif
458 	PSWPIN,
459 	PSWPOUT,
460 	PGFAULT,
461 	PGMAJFAULT,
462 	PGACTIVATE,
463 	PGDEACTIVATE,
464 	PGLAZYFREE,
465 	PGLAZYFREED,
466 #ifdef CONFIG_SWAP
467 	SWPIN_ZERO,
468 	SWPOUT_ZERO,
469 #endif
470 #ifdef CONFIG_ZSWAP
471 	ZSWPIN,
472 	ZSWPOUT,
473 	ZSWPWB,
474 #endif
475 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
476 	THP_FAULT_ALLOC,
477 	THP_COLLAPSE_ALLOC,
478 	THP_SWPOUT,
479 	THP_SWPOUT_FALLBACK,
480 #endif
481 #ifdef CONFIG_NUMA_BALANCING
482 	NUMA_PAGE_MIGRATE,
483 	NUMA_PTE_UPDATES,
484 	NUMA_HINT_FAULTS,
485 #endif
486 };
487 
488 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
489 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
490 
491 static void init_memcg_events(void)
492 {
493 	u8 i;
494 
495 	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
496 
497 	memset(mem_cgroup_events_index, U8_MAX,
498 	       sizeof(mem_cgroup_events_index));
499 
500 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
501 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
502 }
503 
504 static inline int memcg_events_index(enum vm_event_item idx)
505 {
506 	return mem_cgroup_events_index[idx];
507 }
508 
509 struct memcg_vmstats_percpu {
510 	/* Stats updates since the last flush */
511 	unsigned int			stats_updates;
512 
513 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
514 	struct memcg_vmstats_percpu __percpu	*parent_pcpu;
515 	struct memcg_vmstats			*vmstats;
516 
517 	/* The above should fit a single cacheline for memcg_rstat_updated() */
518 
519 	/* Local (CPU and cgroup) page state & events */
520 	long			state[MEMCG_VMSTAT_SIZE];
521 	unsigned long		events[NR_MEMCG_EVENTS];
522 
523 	/* Delta calculation for lockless upward propagation */
524 	long			state_prev[MEMCG_VMSTAT_SIZE];
525 	unsigned long		events_prev[NR_MEMCG_EVENTS];
526 } ____cacheline_aligned;
527 
528 struct memcg_vmstats {
529 	/* Aggregated (CPU and subtree) page state & events */
530 	long			state[MEMCG_VMSTAT_SIZE];
531 	unsigned long		events[NR_MEMCG_EVENTS];
532 
533 	/* Non-hierarchical (CPU aggregated) page state & events */
534 	long			state_local[MEMCG_VMSTAT_SIZE];
535 	unsigned long		events_local[NR_MEMCG_EVENTS];
536 
537 	/* Pending child counts during tree propagation */
538 	long			state_pending[MEMCG_VMSTAT_SIZE];
539 	unsigned long		events_pending[NR_MEMCG_EVENTS];
540 
541 	/* Stats updates since the last flush */
542 	atomic_t		stats_updates;
543 };
544 
545 /*
546  * memcg and lruvec stats flushing
547  *
548  * Many codepaths leading to stats update or read are performance sensitive and
549  * adding stats flushing in such codepaths is not desirable. So, to optimize the
550  * flushing the kernel does:
551  *
552  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
553  *    rstat update tree grow unbounded.
554  *
555  * 2) Flush the stats synchronously on reader side only when there are more than
556  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
557  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
558  *    only for 2 seconds due to (1).
559  */
560 static void flush_memcg_stats_dwork(struct work_struct *w);
561 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
562 static u64 flush_last_time;
563 
564 #define FLUSH_TIME (2UL*HZ)
565 
566 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
567 {
568 	return atomic_read(&vmstats->stats_updates) >
569 		MEMCG_CHARGE_BATCH * num_online_cpus();
570 }
571 
572 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
573 				       int cpu)
574 {
575 	struct memcg_vmstats_percpu __percpu *statc_pcpu;
576 	struct memcg_vmstats_percpu *statc;
577 	unsigned int stats_updates;
578 
579 	if (!val)
580 		return;
581 
582 	css_rstat_updated(&memcg->css, cpu);
583 	statc_pcpu = memcg->vmstats_percpu;
584 	for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
585 		statc = this_cpu_ptr(statc_pcpu);
586 		/*
587 		 * If @memcg is already flushable then all its ancestors are
588 		 * flushable as well and also there is no need to increase
589 		 * stats_updates.
590 		 */
591 		if (memcg_vmstats_needs_flush(statc->vmstats))
592 			break;
593 
594 		stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
595 						    abs(val));
596 		if (stats_updates < MEMCG_CHARGE_BATCH)
597 			continue;
598 
599 		stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
600 		atomic_add(stats_updates, &statc->vmstats->stats_updates);
601 	}
602 }
603 
604 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
605 {
606 	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
607 
608 	trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
609 		force, needs_flush);
610 
611 	if (!force && !needs_flush)
612 		return;
613 
614 	if (mem_cgroup_is_root(memcg))
615 		WRITE_ONCE(flush_last_time, jiffies_64);
616 
617 	css_rstat_flush(&memcg->css);
618 }
619 
620 /*
621  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
622  * @memcg: root of the subtree to flush
623  *
624  * Flushing is serialized by the underlying global rstat lock. There is also a
625  * minimum amount of work to be done even if there are no stat updates to flush.
626  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
627  * avoids unnecessary work and contention on the underlying lock.
628  */
629 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
630 {
631 	if (mem_cgroup_disabled())
632 		return;
633 
634 	if (!memcg)
635 		memcg = root_mem_cgroup;
636 
637 	__mem_cgroup_flush_stats(memcg, false);
638 }
639 
640 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
641 {
642 	/* Only flush if the periodic flusher is one full cycle late */
643 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
644 		mem_cgroup_flush_stats(memcg);
645 }
646 
647 static void flush_memcg_stats_dwork(struct work_struct *w)
648 {
649 	/*
650 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
651 	 * in latency-sensitive paths is as cheap as possible.
652 	 */
653 	__mem_cgroup_flush_stats(root_mem_cgroup, true);
654 	queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
655 }
656 
657 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
658 {
659 	long x;
660 	int i = memcg_stats_index(idx);
661 
662 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
663 		return 0;
664 
665 	x = READ_ONCE(memcg->vmstats->state[i]);
666 #ifdef CONFIG_SMP
667 	if (x < 0)
668 		x = 0;
669 #endif
670 	return x;
671 }
672 
673 bool memcg_stat_item_valid(int idx)
674 {
675 	if ((u32)idx >= MEMCG_NR_STAT)
676 		return false;
677 
678 	return !BAD_STAT_IDX(memcg_stats_index(idx));
679 }
680 
681 static int memcg_page_state_unit(int item);
682 
683 /*
684  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
685  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
686  */
687 static int memcg_state_val_in_pages(int idx, int val)
688 {
689 	int unit = memcg_page_state_unit(idx);
690 
691 	if (!val || unit == PAGE_SIZE)
692 		return val;
693 	else
694 		return max(val * unit / PAGE_SIZE, 1UL);
695 }
696 
697 /**
698  * mod_memcg_state - update cgroup memory statistics
699  * @memcg: the memory cgroup
700  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
701  * @val: delta to add to the counter, can be negative
702  */
703 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
704 		       int val)
705 {
706 	int i = memcg_stats_index(idx);
707 	int cpu;
708 
709 	if (mem_cgroup_disabled())
710 		return;
711 
712 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
713 		return;
714 
715 	cpu = get_cpu();
716 
717 	this_cpu_add(memcg->vmstats_percpu->state[i], val);
718 	val = memcg_state_val_in_pages(idx, val);
719 	memcg_rstat_updated(memcg, val, cpu);
720 	trace_mod_memcg_state(memcg, idx, val);
721 
722 	put_cpu();
723 }
724 
725 #ifdef CONFIG_MEMCG_V1
726 /* idx can be of type enum memcg_stat_item or node_stat_item. */
727 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
728 {
729 	long x;
730 	int i = memcg_stats_index(idx);
731 
732 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
733 		return 0;
734 
735 	x = READ_ONCE(memcg->vmstats->state_local[i]);
736 #ifdef CONFIG_SMP
737 	if (x < 0)
738 		x = 0;
739 #endif
740 	return x;
741 }
742 #endif
743 
744 static void mod_memcg_lruvec_state(struct lruvec *lruvec,
745 				     enum node_stat_item idx,
746 				     int val)
747 {
748 	struct mem_cgroup_per_node *pn;
749 	struct mem_cgroup *memcg;
750 	int i = memcg_stats_index(idx);
751 	int cpu;
752 
753 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
754 		return;
755 
756 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
757 	memcg = pn->memcg;
758 
759 	cpu = get_cpu();
760 
761 	/* Update memcg */
762 	this_cpu_add(memcg->vmstats_percpu->state[i], val);
763 
764 	/* Update lruvec */
765 	this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
766 
767 	val = memcg_state_val_in_pages(idx, val);
768 	memcg_rstat_updated(memcg, val, cpu);
769 	trace_mod_memcg_lruvec_state(memcg, idx, val);
770 
771 	put_cpu();
772 }
773 
774 /**
775  * mod_lruvec_state - update lruvec memory statistics
776  * @lruvec: the lruvec
777  * @idx: the stat item
778  * @val: delta to add to the counter, can be negative
779  *
780  * The lruvec is the intersection of the NUMA node and a cgroup. This
781  * function updates the all three counters that are affected by a
782  * change of state at this level: per-node, per-cgroup, per-lruvec.
783  */
784 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
785 			int val)
786 {
787 	/* Update node */
788 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
789 
790 	/* Update memcg and lruvec */
791 	if (!mem_cgroup_disabled())
792 		mod_memcg_lruvec_state(lruvec, idx, val);
793 }
794 
795 void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
796 			     int val)
797 {
798 	struct mem_cgroup *memcg;
799 	pg_data_t *pgdat = folio_pgdat(folio);
800 	struct lruvec *lruvec;
801 
802 	rcu_read_lock();
803 	memcg = folio_memcg(folio);
804 	/* Untracked pages have no memcg, no lruvec. Update only the node */
805 	if (!memcg) {
806 		rcu_read_unlock();
807 		mod_node_page_state(pgdat, idx, val);
808 		return;
809 	}
810 
811 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
812 	mod_lruvec_state(lruvec, idx, val);
813 	rcu_read_unlock();
814 }
815 EXPORT_SYMBOL(lruvec_stat_mod_folio);
816 
817 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
818 {
819 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
820 	struct mem_cgroup *memcg;
821 	struct lruvec *lruvec;
822 
823 	rcu_read_lock();
824 	memcg = mem_cgroup_from_virt(p);
825 
826 	/*
827 	 * Untracked pages have no memcg, no lruvec. Update only the
828 	 * node. If we reparent the slab objects to the root memcg,
829 	 * when we free the slab object, we need to update the per-memcg
830 	 * vmstats to keep it correct for the root memcg.
831 	 */
832 	if (!memcg) {
833 		mod_node_page_state(pgdat, idx, val);
834 	} else {
835 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
836 		mod_lruvec_state(lruvec, idx, val);
837 	}
838 	rcu_read_unlock();
839 }
840 
841 /**
842  * count_memcg_events - account VM events in a cgroup
843  * @memcg: the memory cgroup
844  * @idx: the event item
845  * @count: the number of events that occurred
846  */
847 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
848 			  unsigned long count)
849 {
850 	int i = memcg_events_index(idx);
851 	int cpu;
852 
853 	if (mem_cgroup_disabled())
854 		return;
855 
856 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
857 		return;
858 
859 	cpu = get_cpu();
860 
861 	this_cpu_add(memcg->vmstats_percpu->events[i], count);
862 	memcg_rstat_updated(memcg, count, cpu);
863 	trace_count_memcg_events(memcg, idx, count);
864 
865 	put_cpu();
866 }
867 
868 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
869 {
870 	int i = memcg_events_index(event);
871 
872 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
873 		return 0;
874 
875 	return READ_ONCE(memcg->vmstats->events[i]);
876 }
877 
878 bool memcg_vm_event_item_valid(enum vm_event_item idx)
879 {
880 	if (idx >= NR_VM_EVENT_ITEMS)
881 		return false;
882 
883 	return !BAD_STAT_IDX(memcg_events_index(idx));
884 }
885 
886 #ifdef CONFIG_MEMCG_V1
887 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
888 {
889 	int i = memcg_events_index(event);
890 
891 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
892 		return 0;
893 
894 	return READ_ONCE(memcg->vmstats->events_local[i]);
895 }
896 #endif
897 
898 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
899 {
900 	/*
901 	 * mm_update_next_owner() may clear mm->owner to NULL
902 	 * if it races with swapoff, page migration, etc.
903 	 * So this can be called with p == NULL.
904 	 */
905 	if (unlikely(!p))
906 		return NULL;
907 
908 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
909 }
910 EXPORT_SYMBOL(mem_cgroup_from_task);
911 
912 static __always_inline struct mem_cgroup *active_memcg(void)
913 {
914 	if (!in_task())
915 		return this_cpu_read(int_active_memcg);
916 	else
917 		return current->active_memcg;
918 }
919 
920 /**
921  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
922  * @mm: mm from which memcg should be extracted. It can be NULL.
923  *
924  * Obtain a reference on mm->memcg and returns it if successful. If mm
925  * is NULL, then the memcg is chosen as follows:
926  * 1) The active memcg, if set.
927  * 2) current->mm->memcg, if available
928  * 3) root memcg
929  * If mem_cgroup is disabled, NULL is returned.
930  */
931 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
932 {
933 	struct mem_cgroup *memcg;
934 
935 	if (mem_cgroup_disabled())
936 		return NULL;
937 
938 	/*
939 	 * Page cache insertions can happen without an
940 	 * actual mm context, e.g. during disk probing
941 	 * on boot, loopback IO, acct() writes etc.
942 	 *
943 	 * No need to css_get on root memcg as the reference
944 	 * counting is disabled on the root level in the
945 	 * cgroup core. See CSS_NO_REF.
946 	 */
947 	if (unlikely(!mm)) {
948 		memcg = active_memcg();
949 		if (unlikely(memcg)) {
950 			/* remote memcg must hold a ref */
951 			css_get(&memcg->css);
952 			return memcg;
953 		}
954 		mm = current->mm;
955 		if (unlikely(!mm))
956 			return root_mem_cgroup;
957 	}
958 
959 	rcu_read_lock();
960 	do {
961 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
962 		if (unlikely(!memcg))
963 			memcg = root_mem_cgroup;
964 	} while (!css_tryget(&memcg->css));
965 	rcu_read_unlock();
966 	return memcg;
967 }
968 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
969 
970 /**
971  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
972  */
973 struct mem_cgroup *get_mem_cgroup_from_current(void)
974 {
975 	struct mem_cgroup *memcg;
976 
977 	if (mem_cgroup_disabled())
978 		return NULL;
979 
980 again:
981 	rcu_read_lock();
982 	memcg = mem_cgroup_from_task(current);
983 	if (!css_tryget(&memcg->css)) {
984 		rcu_read_unlock();
985 		goto again;
986 	}
987 	rcu_read_unlock();
988 	return memcg;
989 }
990 
991 /**
992  * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
993  * @folio: folio from which memcg should be extracted.
994  */
995 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
996 {
997 	struct mem_cgroup *memcg = folio_memcg(folio);
998 
999 	if (mem_cgroup_disabled())
1000 		return NULL;
1001 
1002 	rcu_read_lock();
1003 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1004 		memcg = root_mem_cgroup;
1005 	rcu_read_unlock();
1006 	return memcg;
1007 }
1008 
1009 /**
1010  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1011  * @root: hierarchy root
1012  * @prev: previously returned memcg, NULL on first invocation
1013  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1014  *
1015  * Returns references to children of the hierarchy below @root, or
1016  * @root itself, or %NULL after a full round-trip.
1017  *
1018  * Caller must pass the return value in @prev on subsequent
1019  * invocations for reference counting, or use mem_cgroup_iter_break()
1020  * to cancel a hierarchy walk before the round-trip is complete.
1021  *
1022  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1023  * in the hierarchy among all concurrent reclaimers operating on the
1024  * same node.
1025  */
1026 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1027 				   struct mem_cgroup *prev,
1028 				   struct mem_cgroup_reclaim_cookie *reclaim)
1029 {
1030 	struct mem_cgroup_reclaim_iter *iter;
1031 	struct cgroup_subsys_state *css;
1032 	struct mem_cgroup *pos;
1033 	struct mem_cgroup *next;
1034 
1035 	if (mem_cgroup_disabled())
1036 		return NULL;
1037 
1038 	if (!root)
1039 		root = root_mem_cgroup;
1040 
1041 	rcu_read_lock();
1042 restart:
1043 	next = NULL;
1044 
1045 	if (reclaim) {
1046 		int gen;
1047 		int nid = reclaim->pgdat->node_id;
1048 
1049 		iter = &root->nodeinfo[nid]->iter;
1050 		gen = atomic_read(&iter->generation);
1051 
1052 		/*
1053 		 * On start, join the current reclaim iteration cycle.
1054 		 * Exit when a concurrent walker completes it.
1055 		 */
1056 		if (!prev)
1057 			reclaim->generation = gen;
1058 		else if (reclaim->generation != gen)
1059 			goto out_unlock;
1060 
1061 		pos = READ_ONCE(iter->position);
1062 	} else
1063 		pos = prev;
1064 
1065 	css = pos ? &pos->css : NULL;
1066 
1067 	while ((css = css_next_descendant_pre(css, &root->css))) {
1068 		/*
1069 		 * Verify the css and acquire a reference.  The root
1070 		 * is provided by the caller, so we know it's alive
1071 		 * and kicking, and don't take an extra reference.
1072 		 */
1073 		if (css == &root->css || css_tryget(css))
1074 			break;
1075 	}
1076 
1077 	next = mem_cgroup_from_css(css);
1078 
1079 	if (reclaim) {
1080 		/*
1081 		 * The position could have already been updated by a competing
1082 		 * thread, so check that the value hasn't changed since we read
1083 		 * it to avoid reclaiming from the same cgroup twice.
1084 		 */
1085 		if (cmpxchg(&iter->position, pos, next) != pos) {
1086 			if (css && css != &root->css)
1087 				css_put(css);
1088 			goto restart;
1089 		}
1090 
1091 		if (!next) {
1092 			atomic_inc(&iter->generation);
1093 
1094 			/*
1095 			 * Reclaimers share the hierarchy walk, and a
1096 			 * new one might jump in right at the end of
1097 			 * the hierarchy - make sure they see at least
1098 			 * one group and restart from the beginning.
1099 			 */
1100 			if (!prev)
1101 				goto restart;
1102 		}
1103 	}
1104 
1105 out_unlock:
1106 	rcu_read_unlock();
1107 	if (prev && prev != root)
1108 		css_put(&prev->css);
1109 
1110 	return next;
1111 }
1112 
1113 /**
1114  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1115  * @root: hierarchy root
1116  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1117  */
1118 void mem_cgroup_iter_break(struct mem_cgroup *root,
1119 			   struct mem_cgroup *prev)
1120 {
1121 	if (!root)
1122 		root = root_mem_cgroup;
1123 	if (prev && prev != root)
1124 		css_put(&prev->css);
1125 }
1126 
1127 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1128 					struct mem_cgroup *dead_memcg)
1129 {
1130 	struct mem_cgroup_reclaim_iter *iter;
1131 	struct mem_cgroup_per_node *mz;
1132 	int nid;
1133 
1134 	for_each_node(nid) {
1135 		mz = from->nodeinfo[nid];
1136 		iter = &mz->iter;
1137 		cmpxchg(&iter->position, dead_memcg, NULL);
1138 	}
1139 }
1140 
1141 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1142 {
1143 	struct mem_cgroup *memcg = dead_memcg;
1144 	struct mem_cgroup *last;
1145 
1146 	do {
1147 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1148 		last = memcg;
1149 	} while ((memcg = parent_mem_cgroup(memcg)));
1150 
1151 	/*
1152 	 * When cgroup1 non-hierarchy mode is used,
1153 	 * parent_mem_cgroup() does not walk all the way up to the
1154 	 * cgroup root (root_mem_cgroup). So we have to handle
1155 	 * dead_memcg from cgroup root separately.
1156 	 */
1157 	if (!mem_cgroup_is_root(last))
1158 		__invalidate_reclaim_iterators(root_mem_cgroup,
1159 						dead_memcg);
1160 }
1161 
1162 /**
1163  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1164  * @memcg: hierarchy root
1165  * @fn: function to call for each task
1166  * @arg: argument passed to @fn
1167  *
1168  * This function iterates over tasks attached to @memcg or to any of its
1169  * descendants and calls @fn for each task. If @fn returns a non-zero
1170  * value, the function breaks the iteration loop. Otherwise, it will iterate
1171  * over all tasks and return 0.
1172  *
1173  * This function must not be called for the root memory cgroup.
1174  */
1175 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1176 			   int (*fn)(struct task_struct *, void *), void *arg)
1177 {
1178 	struct mem_cgroup *iter;
1179 	int ret = 0;
1180 
1181 	BUG_ON(mem_cgroup_is_root(memcg));
1182 
1183 	for_each_mem_cgroup_tree(iter, memcg) {
1184 		struct css_task_iter it;
1185 		struct task_struct *task;
1186 
1187 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1188 		while (!ret && (task = css_task_iter_next(&it))) {
1189 			ret = fn(task, arg);
1190 			/* Avoid potential softlockup warning */
1191 			cond_resched();
1192 		}
1193 		css_task_iter_end(&it);
1194 		if (ret) {
1195 			mem_cgroup_iter_break(memcg, iter);
1196 			break;
1197 		}
1198 	}
1199 }
1200 
1201 #ifdef CONFIG_DEBUG_VM
1202 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1203 {
1204 	struct mem_cgroup *memcg;
1205 
1206 	if (mem_cgroup_disabled())
1207 		return;
1208 
1209 	memcg = folio_memcg(folio);
1210 
1211 	if (!memcg)
1212 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1213 	else
1214 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1215 }
1216 #endif
1217 
1218 /**
1219  * folio_lruvec_lock - Lock the lruvec for a folio.
1220  * @folio: Pointer to the folio.
1221  *
1222  * These functions are safe to use under any of the following conditions:
1223  * - folio locked
1224  * - folio_test_lru false
1225  * - folio frozen (refcount of 0)
1226  *
1227  * Return: The lruvec this folio is on with its lock held.
1228  */
1229 struct lruvec *folio_lruvec_lock(struct folio *folio)
1230 {
1231 	struct lruvec *lruvec = folio_lruvec(folio);
1232 
1233 	spin_lock(&lruvec->lru_lock);
1234 	lruvec_memcg_debug(lruvec, folio);
1235 
1236 	return lruvec;
1237 }
1238 
1239 /**
1240  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1241  * @folio: Pointer to the folio.
1242  *
1243  * These functions are safe to use under any of the following conditions:
1244  * - folio locked
1245  * - folio_test_lru false
1246  * - folio frozen (refcount of 0)
1247  *
1248  * Return: The lruvec this folio is on with its lock held and interrupts
1249  * disabled.
1250  */
1251 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1252 {
1253 	struct lruvec *lruvec = folio_lruvec(folio);
1254 
1255 	spin_lock_irq(&lruvec->lru_lock);
1256 	lruvec_memcg_debug(lruvec, folio);
1257 
1258 	return lruvec;
1259 }
1260 
1261 /**
1262  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1263  * @folio: Pointer to the folio.
1264  * @flags: Pointer to irqsave flags.
1265  *
1266  * These functions are safe to use under any of the following conditions:
1267  * - folio locked
1268  * - folio_test_lru false
1269  * - folio frozen (refcount of 0)
1270  *
1271  * Return: The lruvec this folio is on with its lock held and interrupts
1272  * disabled.
1273  */
1274 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1275 		unsigned long *flags)
1276 {
1277 	struct lruvec *lruvec = folio_lruvec(folio);
1278 
1279 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1280 	lruvec_memcg_debug(lruvec, folio);
1281 
1282 	return lruvec;
1283 }
1284 
1285 /**
1286  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1287  * @lruvec: mem_cgroup per zone lru vector
1288  * @lru: index of lru list the page is sitting on
1289  * @zid: zone id of the accounted pages
1290  * @nr_pages: positive when adding or negative when removing
1291  *
1292  * This function must be called under lru_lock, just before a page is added
1293  * to or just after a page is removed from an lru list.
1294  */
1295 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1296 				int zid, int nr_pages)
1297 {
1298 	struct mem_cgroup_per_node *mz;
1299 	unsigned long *lru_size;
1300 	long size;
1301 
1302 	if (mem_cgroup_disabled())
1303 		return;
1304 
1305 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1306 	lru_size = &mz->lru_zone_size[zid][lru];
1307 
1308 	if (nr_pages < 0)
1309 		*lru_size += nr_pages;
1310 
1311 	size = *lru_size;
1312 	if (WARN_ONCE(size < 0,
1313 		"%s(%p, %d, %d): lru_size %ld\n",
1314 		__func__, lruvec, lru, nr_pages, size)) {
1315 		VM_BUG_ON(1);
1316 		*lru_size = 0;
1317 	}
1318 
1319 	if (nr_pages > 0)
1320 		*lru_size += nr_pages;
1321 }
1322 
1323 /**
1324  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1325  * @memcg: the memory cgroup
1326  *
1327  * Returns the maximum amount of memory @mem can be charged with, in
1328  * pages.
1329  */
1330 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1331 {
1332 	unsigned long margin = 0;
1333 	unsigned long count;
1334 	unsigned long limit;
1335 
1336 	count = page_counter_read(&memcg->memory);
1337 	limit = READ_ONCE(memcg->memory.max);
1338 	if (count < limit)
1339 		margin = limit - count;
1340 
1341 	if (do_memsw_account()) {
1342 		count = page_counter_read(&memcg->memsw);
1343 		limit = READ_ONCE(memcg->memsw.max);
1344 		if (count < limit)
1345 			margin = min(margin, limit - count);
1346 		else
1347 			margin = 0;
1348 	}
1349 
1350 	return margin;
1351 }
1352 
1353 struct memory_stat {
1354 	const char *name;
1355 	unsigned int idx;
1356 };
1357 
1358 static const struct memory_stat memory_stats[] = {
1359 	{ "anon",			NR_ANON_MAPPED			},
1360 	{ "file",			NR_FILE_PAGES			},
1361 	{ "kernel",			MEMCG_KMEM			},
1362 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1363 	{ "pagetables",			NR_PAGETABLE			},
1364 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1365 	{ "percpu",			MEMCG_PERCPU_B			},
1366 	{ "sock",			MEMCG_SOCK			},
1367 	{ "vmalloc",			NR_VMALLOC			},
1368 	{ "shmem",			NR_SHMEM			},
1369 #ifdef CONFIG_ZSWAP
1370 	{ "zswap",			MEMCG_ZSWAP_B			},
1371 	{ "zswapped",			MEMCG_ZSWAPPED			},
1372 	{ "zswap_incomp",		MEMCG_ZSWAP_INCOMP		},
1373 #endif
1374 	{ "file_mapped",		NR_FILE_MAPPED			},
1375 	{ "file_dirty",			NR_FILE_DIRTY			},
1376 	{ "file_writeback",		NR_WRITEBACK			},
1377 #ifdef CONFIG_SWAP
1378 	{ "swapcached",			NR_SWAPCACHE			},
1379 #endif
1380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1381 	{ "anon_thp",			NR_ANON_THPS			},
1382 	{ "file_thp",			NR_FILE_THPS			},
1383 	{ "shmem_thp",			NR_SHMEM_THPS			},
1384 #endif
1385 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1386 	{ "active_anon",		NR_ACTIVE_ANON			},
1387 	{ "inactive_file",		NR_INACTIVE_FILE		},
1388 	{ "active_file",		NR_ACTIVE_FILE			},
1389 	{ "unevictable",		NR_UNEVICTABLE			},
1390 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1391 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1392 #ifdef CONFIG_HUGETLB_PAGE
1393 	{ "hugetlb",			NR_HUGETLB			},
1394 #endif
1395 
1396 	/* The memory events */
1397 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1398 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1399 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1400 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1401 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1402 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1403 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1404 
1405 	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1406 	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1407 	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1408 	{ "pgdemote_proactive",		PGDEMOTE_PROACTIVE	},
1409 	{ "pgsteal_kswapd",		PGSTEAL_KSWAPD		},
1410 	{ "pgsteal_direct",		PGSTEAL_DIRECT		},
1411 	{ "pgsteal_khugepaged",		PGSTEAL_KHUGEPAGED	},
1412 	{ "pgsteal_proactive",		PGSTEAL_PROACTIVE	},
1413 	{ "pgscan_kswapd",		PGSCAN_KSWAPD		},
1414 	{ "pgscan_direct",		PGSCAN_DIRECT		},
1415 	{ "pgscan_khugepaged",		PGSCAN_KHUGEPAGED	},
1416 	{ "pgscan_proactive",		PGSCAN_PROACTIVE	},
1417 	{ "pgrefill",			PGREFILL		},
1418 #ifdef CONFIG_NUMA_BALANCING
1419 	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1420 #endif
1421 };
1422 
1423 /* The actual unit of the state item, not the same as the output unit */
1424 static int memcg_page_state_unit(int item)
1425 {
1426 	switch (item) {
1427 	case MEMCG_PERCPU_B:
1428 	case MEMCG_ZSWAP_B:
1429 	case NR_SLAB_RECLAIMABLE_B:
1430 	case NR_SLAB_UNRECLAIMABLE_B:
1431 		return 1;
1432 	case NR_KERNEL_STACK_KB:
1433 		return SZ_1K;
1434 	default:
1435 		return PAGE_SIZE;
1436 	}
1437 }
1438 
1439 /* Translate stat items to the correct unit for memory.stat output */
1440 static int memcg_page_state_output_unit(int item)
1441 {
1442 	/*
1443 	 * Workingset state is actually in pages, but we export it to userspace
1444 	 * as a scalar count of events, so special case it here.
1445 	 *
1446 	 * Demotion and promotion activities are exported in pages, consistent
1447 	 * with their global counterparts.
1448 	 */
1449 	switch (item) {
1450 	case WORKINGSET_REFAULT_ANON:
1451 	case WORKINGSET_REFAULT_FILE:
1452 	case WORKINGSET_ACTIVATE_ANON:
1453 	case WORKINGSET_ACTIVATE_FILE:
1454 	case WORKINGSET_RESTORE_ANON:
1455 	case WORKINGSET_RESTORE_FILE:
1456 	case WORKINGSET_NODERECLAIM:
1457 	case PGDEMOTE_KSWAPD:
1458 	case PGDEMOTE_DIRECT:
1459 	case PGDEMOTE_KHUGEPAGED:
1460 	case PGDEMOTE_PROACTIVE:
1461 	case PGSTEAL_KSWAPD:
1462 	case PGSTEAL_DIRECT:
1463 	case PGSTEAL_KHUGEPAGED:
1464 	case PGSTEAL_PROACTIVE:
1465 	case PGSCAN_KSWAPD:
1466 	case PGSCAN_DIRECT:
1467 	case PGSCAN_KHUGEPAGED:
1468 	case PGSCAN_PROACTIVE:
1469 	case PGREFILL:
1470 #ifdef CONFIG_NUMA_BALANCING
1471 	case PGPROMOTE_SUCCESS:
1472 #endif
1473 		return 1;
1474 	default:
1475 		return memcg_page_state_unit(item);
1476 	}
1477 }
1478 
1479 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1480 {
1481 	return memcg_page_state(memcg, item) *
1482 		memcg_page_state_output_unit(item);
1483 }
1484 
1485 #ifdef CONFIG_MEMCG_V1
1486 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1487 {
1488 	return memcg_page_state_local(memcg, item) *
1489 		memcg_page_state_output_unit(item);
1490 }
1491 #endif
1492 
1493 #ifdef CONFIG_HUGETLB_PAGE
1494 static bool memcg_accounts_hugetlb(void)
1495 {
1496 	return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1497 }
1498 #else /* CONFIG_HUGETLB_PAGE */
1499 static bool memcg_accounts_hugetlb(void)
1500 {
1501 	return false;
1502 }
1503 #endif /* CONFIG_HUGETLB_PAGE */
1504 
1505 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1506 {
1507 	int i;
1508 
1509 	/*
1510 	 * Provide statistics on the state of the memory subsystem as
1511 	 * well as cumulative event counters that show past behavior.
1512 	 *
1513 	 * This list is ordered following a combination of these gradients:
1514 	 * 1) generic big picture -> specifics and details
1515 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1516 	 *
1517 	 * Current memory state:
1518 	 */
1519 	mem_cgroup_flush_stats(memcg);
1520 
1521 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1522 		u64 size;
1523 
1524 #ifdef CONFIG_HUGETLB_PAGE
1525 		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1526 			!memcg_accounts_hugetlb())
1527 			continue;
1528 #endif
1529 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1530 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1531 
1532 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1533 			size += memcg_page_state_output(memcg,
1534 							NR_SLAB_RECLAIMABLE_B);
1535 			seq_buf_printf(s, "slab %llu\n", size);
1536 		}
1537 	}
1538 
1539 	/* Accumulated memory events */
1540 	seq_buf_printf(s, "pgscan %lu\n",
1541 		       memcg_page_state(memcg, PGSCAN_KSWAPD) +
1542 		       memcg_page_state(memcg, PGSCAN_DIRECT) +
1543 		       memcg_page_state(memcg, PGSCAN_PROACTIVE) +
1544 		       memcg_page_state(memcg, PGSCAN_KHUGEPAGED));
1545 	seq_buf_printf(s, "pgsteal %lu\n",
1546 		       memcg_page_state(memcg, PGSTEAL_KSWAPD) +
1547 		       memcg_page_state(memcg, PGSTEAL_DIRECT) +
1548 		       memcg_page_state(memcg, PGSTEAL_PROACTIVE) +
1549 		       memcg_page_state(memcg, PGSTEAL_KHUGEPAGED));
1550 
1551 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1552 #ifdef CONFIG_MEMCG_V1
1553 		if (memcg_vm_event_stat[i] == PGPGIN ||
1554 		    memcg_vm_event_stat[i] == PGPGOUT)
1555 			continue;
1556 #endif
1557 		seq_buf_printf(s, "%s %lu\n",
1558 			       vm_event_name(memcg_vm_event_stat[i]),
1559 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1560 	}
1561 }
1562 
1563 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1564 {
1565 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1566 		memcg_stat_format(memcg, s);
1567 	else
1568 		memcg1_stat_format(memcg, s);
1569 	if (seq_buf_has_overflowed(s))
1570 		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1571 }
1572 
1573 /**
1574  * mem_cgroup_print_oom_context: Print OOM information relevant to
1575  * memory controller.
1576  * @memcg: The memory cgroup that went over limit
1577  * @p: Task that is going to be killed
1578  *
1579  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1580  * enabled
1581  */
1582 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1583 {
1584 	rcu_read_lock();
1585 
1586 	if (memcg) {
1587 		pr_cont(",oom_memcg=");
1588 		pr_cont_cgroup_path(memcg->css.cgroup);
1589 	} else
1590 		pr_cont(",global_oom");
1591 	if (p) {
1592 		pr_cont(",task_memcg=");
1593 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1594 	}
1595 	rcu_read_unlock();
1596 }
1597 
1598 /**
1599  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1600  * memory controller.
1601  * @memcg: The memory cgroup that went over limit
1602  */
1603 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1604 {
1605 	/* Use static buffer, for the caller is holding oom_lock. */
1606 	static char buf[SEQ_BUF_SIZE];
1607 	struct seq_buf s;
1608 	unsigned long memory_failcnt;
1609 
1610 	lockdep_assert_held(&oom_lock);
1611 
1612 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1613 		memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1614 	else
1615 		memory_failcnt = memcg->memory.failcnt;
1616 
1617 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1618 		K((u64)page_counter_read(&memcg->memory)),
1619 		K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1620 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1621 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1622 			K((u64)page_counter_read(&memcg->swap)),
1623 			K((u64)READ_ONCE(memcg->swap.max)),
1624 			atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1625 #ifdef CONFIG_MEMCG_V1
1626 	else {
1627 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1628 			K((u64)page_counter_read(&memcg->memsw)),
1629 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1630 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1631 			K((u64)page_counter_read(&memcg->kmem)),
1632 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1633 	}
1634 #endif
1635 
1636 	pr_info("Memory cgroup stats for ");
1637 	pr_cont_cgroup_path(memcg->css.cgroup);
1638 	pr_cont(":");
1639 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1640 	memory_stat_format(memcg, &s);
1641 	seq_buf_do_printk(&s, KERN_INFO);
1642 }
1643 
1644 /*
1645  * Return the memory (and swap, if configured) limit for a memcg.
1646  */
1647 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1648 {
1649 	unsigned long max = READ_ONCE(memcg->memory.max);
1650 
1651 	if (do_memsw_account()) {
1652 		if (mem_cgroup_swappiness(memcg)) {
1653 			/* Calculate swap excess capacity from memsw limit */
1654 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1655 
1656 			max += min(swap, (unsigned long)total_swap_pages);
1657 		}
1658 	} else {
1659 		if (mem_cgroup_swappiness(memcg))
1660 			max += min(READ_ONCE(memcg->swap.max),
1661 				   (unsigned long)total_swap_pages);
1662 	}
1663 	return max;
1664 }
1665 
1666 void __memcg_memory_event(struct mem_cgroup *memcg,
1667 			  enum memcg_memory_event event, bool allow_spinning)
1668 {
1669 	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1670 			  event == MEMCG_SWAP_FAIL;
1671 
1672 	/* For now only MEMCG_MAX can happen with !allow_spinning context. */
1673 	VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
1674 
1675 	atomic_long_inc(&memcg->memory_events_local[event]);
1676 	if (!swap_event && allow_spinning)
1677 		cgroup_file_notify(&memcg->events_local_file);
1678 
1679 	do {
1680 		atomic_long_inc(&memcg->memory_events[event]);
1681 		if (allow_spinning) {
1682 			if (swap_event)
1683 				cgroup_file_notify(&memcg->swap_events_file);
1684 			else
1685 				cgroup_file_notify(&memcg->events_file);
1686 		}
1687 
1688 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1689 			break;
1690 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1691 			break;
1692 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1693 		 !mem_cgroup_is_root(memcg));
1694 }
1695 EXPORT_SYMBOL_GPL(__memcg_memory_event);
1696 
1697 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1698 				     int order)
1699 {
1700 	struct oom_control oc = {
1701 		.zonelist = NULL,
1702 		.nodemask = NULL,
1703 		.memcg = memcg,
1704 		.gfp_mask = gfp_mask,
1705 		.order = order,
1706 	};
1707 	bool ret = true;
1708 
1709 	if (mutex_lock_killable(&oom_lock))
1710 		return true;
1711 
1712 	if (mem_cgroup_margin(memcg) >= (1 << order))
1713 		goto unlock;
1714 
1715 	/*
1716 	 * A few threads which were not waiting at mutex_lock_killable() can
1717 	 * fail to bail out. Therefore, check again after holding oom_lock.
1718 	 */
1719 	ret = out_of_memory(&oc);
1720 
1721 unlock:
1722 	mutex_unlock(&oom_lock);
1723 	return ret;
1724 }
1725 
1726 /*
1727  * Returns true if successfully killed one or more processes. Though in some
1728  * corner cases it can return true even without killing any process.
1729  */
1730 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1731 {
1732 	bool locked, ret;
1733 
1734 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1735 		return false;
1736 
1737 	memcg_memory_event(memcg, MEMCG_OOM);
1738 
1739 	if (!memcg1_oom_prepare(memcg, &locked))
1740 		return false;
1741 
1742 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1743 
1744 	memcg1_oom_finish(memcg, locked);
1745 
1746 	return ret;
1747 }
1748 
1749 /**
1750  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1751  * @victim: task to be killed by the OOM killer
1752  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1753  *
1754  * Returns a pointer to a memory cgroup, which has to be cleaned up
1755  * by killing all belonging OOM-killable tasks.
1756  *
1757  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1758  */
1759 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1760 					    struct mem_cgroup *oom_domain)
1761 {
1762 	struct mem_cgroup *oom_group = NULL;
1763 	struct mem_cgroup *memcg;
1764 
1765 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1766 		return NULL;
1767 
1768 	if (!oom_domain)
1769 		oom_domain = root_mem_cgroup;
1770 
1771 	rcu_read_lock();
1772 
1773 	memcg = mem_cgroup_from_task(victim);
1774 	if (mem_cgroup_is_root(memcg))
1775 		goto out;
1776 
1777 	/*
1778 	 * If the victim task has been asynchronously moved to a different
1779 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1780 	 * In this case it's better to ignore memory.group.oom.
1781 	 */
1782 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1783 		goto out;
1784 
1785 	/*
1786 	 * Traverse the memory cgroup hierarchy from the victim task's
1787 	 * cgroup up to the OOMing cgroup (or root) to find the
1788 	 * highest-level memory cgroup with oom.group set.
1789 	 */
1790 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1791 		if (READ_ONCE(memcg->oom_group))
1792 			oom_group = memcg;
1793 
1794 		if (memcg == oom_domain)
1795 			break;
1796 	}
1797 
1798 	if (oom_group)
1799 		css_get(&oom_group->css);
1800 out:
1801 	rcu_read_unlock();
1802 
1803 	return oom_group;
1804 }
1805 
1806 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1807 {
1808 	pr_info("Tasks in ");
1809 	pr_cont_cgroup_path(memcg->css.cgroup);
1810 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1811 }
1812 
1813 /*
1814  * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
1815  * nr_pages in a single cacheline. This may change in future.
1816  */
1817 #define NR_MEMCG_STOCK 7
1818 #define FLUSHING_CACHED_CHARGE	0
1819 struct memcg_stock_pcp {
1820 	local_trylock_t lock;
1821 	uint8_t nr_pages[NR_MEMCG_STOCK];
1822 	struct mem_cgroup *cached[NR_MEMCG_STOCK];
1823 
1824 	struct work_struct work;
1825 	unsigned long flags;
1826 };
1827 
1828 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
1829 	.lock = INIT_LOCAL_TRYLOCK(lock),
1830 };
1831 
1832 struct obj_stock_pcp {
1833 	local_trylock_t lock;
1834 	unsigned int nr_bytes;
1835 	struct obj_cgroup *cached_objcg;
1836 	struct pglist_data *cached_pgdat;
1837 	int nr_slab_reclaimable_b;
1838 	int nr_slab_unreclaimable_b;
1839 
1840 	struct work_struct work;
1841 	unsigned long flags;
1842 };
1843 
1844 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
1845 	.lock = INIT_LOCAL_TRYLOCK(lock),
1846 };
1847 
1848 static DEFINE_MUTEX(percpu_charge_mutex);
1849 
1850 static void drain_obj_stock(struct obj_stock_pcp *stock);
1851 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
1852 				     struct mem_cgroup *root_memcg);
1853 
1854 /**
1855  * consume_stock: Try to consume stocked charge on this cpu.
1856  * @memcg: memcg to consume from.
1857  * @nr_pages: how many pages to charge.
1858  *
1859  * Consume the cached charge if enough nr_pages are present otherwise return
1860  * failure. Also return failure for charge request larger than
1861  * MEMCG_CHARGE_BATCH or if the local lock is already taken.
1862  *
1863  * returns true if successful, false otherwise.
1864  */
1865 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1866 {
1867 	struct memcg_stock_pcp *stock;
1868 	uint8_t stock_pages;
1869 	bool ret = false;
1870 	int i;
1871 
1872 	if (nr_pages > MEMCG_CHARGE_BATCH ||
1873 	    !local_trylock(&memcg_stock.lock))
1874 		return ret;
1875 
1876 	stock = this_cpu_ptr(&memcg_stock);
1877 
1878 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1879 		if (memcg != READ_ONCE(stock->cached[i]))
1880 			continue;
1881 
1882 		stock_pages = READ_ONCE(stock->nr_pages[i]);
1883 		if (stock_pages >= nr_pages) {
1884 			WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
1885 			ret = true;
1886 		}
1887 		break;
1888 	}
1889 
1890 	local_unlock(&memcg_stock.lock);
1891 
1892 	return ret;
1893 }
1894 
1895 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
1896 {
1897 	page_counter_uncharge(&memcg->memory, nr_pages);
1898 	if (do_memsw_account())
1899 		page_counter_uncharge(&memcg->memsw, nr_pages);
1900 }
1901 
1902 /*
1903  * Returns stocks cached in percpu and reset cached information.
1904  */
1905 static void drain_stock(struct memcg_stock_pcp *stock, int i)
1906 {
1907 	struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
1908 	uint8_t stock_pages;
1909 
1910 	if (!old)
1911 		return;
1912 
1913 	stock_pages = READ_ONCE(stock->nr_pages[i]);
1914 	if (stock_pages) {
1915 		memcg_uncharge(old, stock_pages);
1916 		WRITE_ONCE(stock->nr_pages[i], 0);
1917 	}
1918 
1919 	css_put(&old->css);
1920 	WRITE_ONCE(stock->cached[i], NULL);
1921 }
1922 
1923 static void drain_stock_fully(struct memcg_stock_pcp *stock)
1924 {
1925 	int i;
1926 
1927 	for (i = 0; i < NR_MEMCG_STOCK; ++i)
1928 		drain_stock(stock, i);
1929 }
1930 
1931 static void drain_local_memcg_stock(struct work_struct *dummy)
1932 {
1933 	struct memcg_stock_pcp *stock;
1934 
1935 	if (WARN_ONCE(!in_task(), "drain in non-task context"))
1936 		return;
1937 
1938 	local_lock(&memcg_stock.lock);
1939 
1940 	stock = this_cpu_ptr(&memcg_stock);
1941 	drain_stock_fully(stock);
1942 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1943 
1944 	local_unlock(&memcg_stock.lock);
1945 }
1946 
1947 static void drain_local_obj_stock(struct work_struct *dummy)
1948 {
1949 	struct obj_stock_pcp *stock;
1950 
1951 	if (WARN_ONCE(!in_task(), "drain in non-task context"))
1952 		return;
1953 
1954 	local_lock(&obj_stock.lock);
1955 
1956 	stock = this_cpu_ptr(&obj_stock);
1957 	drain_obj_stock(stock);
1958 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1959 
1960 	local_unlock(&obj_stock.lock);
1961 }
1962 
1963 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1964 {
1965 	struct memcg_stock_pcp *stock;
1966 	struct mem_cgroup *cached;
1967 	uint8_t stock_pages;
1968 	bool success = false;
1969 	int empty_slot = -1;
1970 	int i;
1971 
1972 	/*
1973 	 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
1974 	 * decide to increase it more than 127 then we will need more careful
1975 	 * handling of nr_pages[] in struct memcg_stock_pcp.
1976 	 */
1977 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
1978 
1979 	VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
1980 
1981 	if (nr_pages > MEMCG_CHARGE_BATCH ||
1982 	    !local_trylock(&memcg_stock.lock)) {
1983 		/*
1984 		 * In case of larger than batch refill or unlikely failure to
1985 		 * lock the percpu memcg_stock.lock, uncharge memcg directly.
1986 		 */
1987 		memcg_uncharge(memcg, nr_pages);
1988 		return;
1989 	}
1990 
1991 	stock = this_cpu_ptr(&memcg_stock);
1992 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1993 		cached = READ_ONCE(stock->cached[i]);
1994 		if (!cached && empty_slot == -1)
1995 			empty_slot = i;
1996 		if (memcg == READ_ONCE(stock->cached[i])) {
1997 			stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
1998 			WRITE_ONCE(stock->nr_pages[i], stock_pages);
1999 			if (stock_pages > MEMCG_CHARGE_BATCH)
2000 				drain_stock(stock, i);
2001 			success = true;
2002 			break;
2003 		}
2004 	}
2005 
2006 	if (!success) {
2007 		i = empty_slot;
2008 		if (i == -1) {
2009 			i = get_random_u32_below(NR_MEMCG_STOCK);
2010 			drain_stock(stock, i);
2011 		}
2012 		css_get(&memcg->css);
2013 		WRITE_ONCE(stock->cached[i], memcg);
2014 		WRITE_ONCE(stock->nr_pages[i], nr_pages);
2015 	}
2016 
2017 	local_unlock(&memcg_stock.lock);
2018 }
2019 
2020 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
2021 				  struct mem_cgroup *root_memcg)
2022 {
2023 	struct mem_cgroup *memcg;
2024 	bool flush = false;
2025 	int i;
2026 
2027 	rcu_read_lock();
2028 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2029 		memcg = READ_ONCE(stock->cached[i]);
2030 		if (!memcg)
2031 			continue;
2032 
2033 		if (READ_ONCE(stock->nr_pages[i]) &&
2034 		    mem_cgroup_is_descendant(memcg, root_memcg)) {
2035 			flush = true;
2036 			break;
2037 		}
2038 	}
2039 	rcu_read_unlock();
2040 	return flush;
2041 }
2042 
2043 static void schedule_drain_work(int cpu, struct work_struct *work)
2044 {
2045 	/*
2046 	 * Protect housekeeping cpumask read and work enqueue together
2047 	 * in the same RCU critical section so that later cpuset isolated
2048 	 * partition update only need to wait for an RCU GP and flush the
2049 	 * pending work on newly isolated CPUs.
2050 	 */
2051 	guard(rcu)();
2052 	if (!cpu_is_isolated(cpu))
2053 		queue_work_on(cpu, memcg_wq, work);
2054 }
2055 
2056 /*
2057  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2058  * of the hierarchy under it.
2059  */
2060 void drain_all_stock(struct mem_cgroup *root_memcg)
2061 {
2062 	int cpu, curcpu;
2063 
2064 	/* If someone's already draining, avoid adding running more workers. */
2065 	if (!mutex_trylock(&percpu_charge_mutex))
2066 		return;
2067 	/*
2068 	 * Notify other cpus that system-wide "drain" is running
2069 	 * We do not care about races with the cpu hotplug because cpu down
2070 	 * as well as workers from this path always operate on the local
2071 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2072 	 */
2073 	migrate_disable();
2074 	curcpu = smp_processor_id();
2075 	for_each_online_cpu(cpu) {
2076 		struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
2077 		struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
2078 
2079 		if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
2080 		    is_memcg_drain_needed(memcg_st, root_memcg) &&
2081 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2082 				      &memcg_st->flags)) {
2083 			if (cpu == curcpu)
2084 				drain_local_memcg_stock(&memcg_st->work);
2085 			else
2086 				schedule_drain_work(cpu, &memcg_st->work);
2087 		}
2088 
2089 		if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2090 		    obj_stock_flush_required(obj_st, root_memcg) &&
2091 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2092 				      &obj_st->flags)) {
2093 			if (cpu == curcpu)
2094 				drain_local_obj_stock(&obj_st->work);
2095 			else
2096 				schedule_drain_work(cpu, &obj_st->work);
2097 		}
2098 	}
2099 	migrate_enable();
2100 	mutex_unlock(&percpu_charge_mutex);
2101 }
2102 
2103 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2104 {
2105 	/* no need for the local lock */
2106 	drain_obj_stock(&per_cpu(obj_stock, cpu));
2107 	drain_stock_fully(&per_cpu(memcg_stock, cpu));
2108 
2109 	return 0;
2110 }
2111 
2112 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2113 				  unsigned int nr_pages,
2114 				  gfp_t gfp_mask)
2115 {
2116 	unsigned long nr_reclaimed = 0;
2117 
2118 	do {
2119 		unsigned long pflags;
2120 
2121 		if (page_counter_read(&memcg->memory) <=
2122 		    READ_ONCE(memcg->memory.high))
2123 			continue;
2124 
2125 		memcg_memory_event(memcg, MEMCG_HIGH);
2126 
2127 		psi_memstall_enter(&pflags);
2128 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2129 							gfp_mask,
2130 							MEMCG_RECLAIM_MAY_SWAP,
2131 							NULL);
2132 		psi_memstall_leave(&pflags);
2133 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2134 		 !mem_cgroup_is_root(memcg));
2135 
2136 	return nr_reclaimed;
2137 }
2138 
2139 static void high_work_func(struct work_struct *work)
2140 {
2141 	struct mem_cgroup *memcg;
2142 
2143 	memcg = container_of(work, struct mem_cgroup, high_work);
2144 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2145 }
2146 
2147 /*
2148  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2149  * enough to still cause a significant slowdown in most cases, while still
2150  * allowing diagnostics and tracing to proceed without becoming stuck.
2151  */
2152 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2153 
2154 /*
2155  * When calculating the delay, we use these either side of the exponentiation to
2156  * maintain precision and scale to a reasonable number of jiffies (see the table
2157  * below.
2158  *
2159  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2160  *   overage ratio to a delay.
2161  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2162  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2163  *   to produce a reasonable delay curve.
2164  *
2165  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2166  * reasonable delay curve compared to precision-adjusted overage, not
2167  * penalising heavily at first, but still making sure that growth beyond the
2168  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2169  * example, with a high of 100 megabytes:
2170  *
2171  *  +-------+------------------------+
2172  *  | usage | time to allocate in ms |
2173  *  +-------+------------------------+
2174  *  | 100M  |                      0 |
2175  *  | 101M  |                      6 |
2176  *  | 102M  |                     25 |
2177  *  | 103M  |                     57 |
2178  *  | 104M  |                    102 |
2179  *  | 105M  |                    159 |
2180  *  | 106M  |                    230 |
2181  *  | 107M  |                    313 |
2182  *  | 108M  |                    409 |
2183  *  | 109M  |                    518 |
2184  *  | 110M  |                    639 |
2185  *  | 111M  |                    774 |
2186  *  | 112M  |                    921 |
2187  *  | 113M  |                   1081 |
2188  *  | 114M  |                   1254 |
2189  *  | 115M  |                   1439 |
2190  *  | 116M  |                   1638 |
2191  *  | 117M  |                   1849 |
2192  *  | 118M  |                   2000 |
2193  *  | 119M  |                   2000 |
2194  *  | 120M  |                   2000 |
2195  *  +-------+------------------------+
2196  */
2197  #define MEMCG_DELAY_PRECISION_SHIFT 20
2198  #define MEMCG_DELAY_SCALING_SHIFT 14
2199 
2200 static u64 calculate_overage(unsigned long usage, unsigned long high)
2201 {
2202 	u64 overage;
2203 
2204 	if (usage <= high)
2205 		return 0;
2206 
2207 	/*
2208 	 * Prevent division by 0 in overage calculation by acting as if
2209 	 * it was a threshold of 1 page
2210 	 */
2211 	high = max(high, 1UL);
2212 
2213 	overage = usage - high;
2214 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2215 	return div64_u64(overage, high);
2216 }
2217 
2218 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2219 {
2220 	u64 overage, max_overage = 0;
2221 
2222 	do {
2223 		overage = calculate_overage(page_counter_read(&memcg->memory),
2224 					    READ_ONCE(memcg->memory.high));
2225 		max_overage = max(overage, max_overage);
2226 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2227 		 !mem_cgroup_is_root(memcg));
2228 
2229 	return max_overage;
2230 }
2231 
2232 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2233 {
2234 	u64 overage, max_overage = 0;
2235 
2236 	do {
2237 		overage = calculate_overage(page_counter_read(&memcg->swap),
2238 					    READ_ONCE(memcg->swap.high));
2239 		if (overage)
2240 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2241 		max_overage = max(overage, max_overage);
2242 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2243 		 !mem_cgroup_is_root(memcg));
2244 
2245 	return max_overage;
2246 }
2247 
2248 /*
2249  * Get the number of jiffies that we should penalise a mischievous cgroup which
2250  * is exceeding its memory.high by checking both it and its ancestors.
2251  */
2252 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2253 					  unsigned int nr_pages,
2254 					  u64 max_overage)
2255 {
2256 	unsigned long penalty_jiffies;
2257 
2258 	if (!max_overage)
2259 		return 0;
2260 
2261 	/*
2262 	 * We use overage compared to memory.high to calculate the number of
2263 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2264 	 * fairly lenient on small overages, and increasingly harsh when the
2265 	 * memcg in question makes it clear that it has no intention of stopping
2266 	 * its crazy behaviour, so we exponentially increase the delay based on
2267 	 * overage amount.
2268 	 */
2269 	penalty_jiffies = max_overage * max_overage * HZ;
2270 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2271 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2272 
2273 	/*
2274 	 * Factor in the task's own contribution to the overage, such that four
2275 	 * N-sized allocations are throttled approximately the same as one
2276 	 * 4N-sized allocation.
2277 	 *
2278 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2279 	 * larger the current charge patch is than that.
2280 	 */
2281 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2282 }
2283 
2284 /*
2285  * Reclaims memory over the high limit. Called directly from
2286  * try_charge() (context permitting), as well as from the userland
2287  * return path where reclaim is always able to block.
2288  */
2289 void __mem_cgroup_handle_over_high(gfp_t gfp_mask)
2290 {
2291 	unsigned long penalty_jiffies;
2292 	unsigned long pflags;
2293 	unsigned long nr_reclaimed;
2294 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2295 	int nr_retries = MAX_RECLAIM_RETRIES;
2296 	struct mem_cgroup *memcg;
2297 	bool in_retry = false;
2298 
2299 	memcg = get_mem_cgroup_from_mm(current->mm);
2300 	current->memcg_nr_pages_over_high = 0;
2301 
2302 retry_reclaim:
2303 	/*
2304 	 * Bail if the task is already exiting. Unlike memory.max,
2305 	 * memory.high enforcement isn't as strict, and there is no
2306 	 * OOM killer involved, which means the excess could already
2307 	 * be much bigger (and still growing) than it could for
2308 	 * memory.max; the dying task could get stuck in fruitless
2309 	 * reclaim for a long time, which isn't desirable.
2310 	 */
2311 	if (task_is_dying())
2312 		goto out;
2313 
2314 	/*
2315 	 * The allocating task should reclaim at least the batch size, but for
2316 	 * subsequent retries we only want to do what's necessary to prevent oom
2317 	 * or breaching resource isolation.
2318 	 *
2319 	 * This is distinct from memory.max or page allocator behaviour because
2320 	 * memory.high is currently batched, whereas memory.max and the page
2321 	 * allocator run every time an allocation is made.
2322 	 */
2323 	nr_reclaimed = reclaim_high(memcg,
2324 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2325 				    gfp_mask);
2326 
2327 	/*
2328 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2329 	 * allocators proactively to slow down excessive growth.
2330 	 */
2331 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2332 					       mem_find_max_overage(memcg));
2333 
2334 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2335 						swap_find_max_overage(memcg));
2336 
2337 	/*
2338 	 * Clamp the max delay per usermode return so as to still keep the
2339 	 * application moving forwards and also permit diagnostics, albeit
2340 	 * extremely slowly.
2341 	 */
2342 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2343 
2344 	/*
2345 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2346 	 * that it's not even worth doing, in an attempt to be nice to those who
2347 	 * go only a small amount over their memory.high value and maybe haven't
2348 	 * been aggressively reclaimed enough yet.
2349 	 */
2350 	if (penalty_jiffies <= HZ / 100)
2351 		goto out;
2352 
2353 	/*
2354 	 * If reclaim is making forward progress but we're still over
2355 	 * memory.high, we want to encourage that rather than doing allocator
2356 	 * throttling.
2357 	 */
2358 	if (nr_reclaimed || nr_retries--) {
2359 		in_retry = true;
2360 		goto retry_reclaim;
2361 	}
2362 
2363 	/*
2364 	 * Reclaim didn't manage to push usage below the limit, slow
2365 	 * this allocating task down.
2366 	 *
2367 	 * If we exit early, we're guaranteed to die (since
2368 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2369 	 * need to account for any ill-begotten jiffies to pay them off later.
2370 	 */
2371 	psi_memstall_enter(&pflags);
2372 	schedule_timeout_killable(penalty_jiffies);
2373 	psi_memstall_leave(&pflags);
2374 
2375 out:
2376 	css_put(&memcg->css);
2377 }
2378 
2379 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2380 			    unsigned int nr_pages)
2381 {
2382 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2383 	int nr_retries = MAX_RECLAIM_RETRIES;
2384 	struct mem_cgroup *mem_over_limit;
2385 	struct page_counter *counter;
2386 	unsigned long nr_reclaimed;
2387 	bool passed_oom = false;
2388 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2389 	bool drained = false;
2390 	bool raised_max_event = false;
2391 	unsigned long pflags;
2392 	bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
2393 
2394 retry:
2395 	if (consume_stock(memcg, nr_pages))
2396 		return 0;
2397 
2398 	if (!allow_spinning)
2399 		/* Avoid the refill and flush of the older stock */
2400 		batch = nr_pages;
2401 
2402 	if (!do_memsw_account() ||
2403 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2404 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2405 			goto done_restock;
2406 		if (do_memsw_account())
2407 			page_counter_uncharge(&memcg->memsw, batch);
2408 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2409 	} else {
2410 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2411 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2412 	}
2413 
2414 	if (batch > nr_pages) {
2415 		batch = nr_pages;
2416 		goto retry;
2417 	}
2418 
2419 	/*
2420 	 * Prevent unbounded recursion when reclaim operations need to
2421 	 * allocate memory. This might exceed the limits temporarily,
2422 	 * but we prefer facilitating memory reclaim and getting back
2423 	 * under the limit over triggering OOM kills in these cases.
2424 	 */
2425 	if (unlikely(current->flags & PF_MEMALLOC))
2426 		goto force;
2427 
2428 	if (unlikely(task_in_memcg_oom(current)))
2429 		goto nomem;
2430 
2431 	if (!gfpflags_allow_blocking(gfp_mask))
2432 		goto nomem;
2433 
2434 	__memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2435 	raised_max_event = true;
2436 
2437 	psi_memstall_enter(&pflags);
2438 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2439 						    gfp_mask, reclaim_options, NULL);
2440 	psi_memstall_leave(&pflags);
2441 
2442 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2443 		goto retry;
2444 
2445 	if (!drained) {
2446 		drain_all_stock(mem_over_limit);
2447 		drained = true;
2448 		goto retry;
2449 	}
2450 
2451 	if (gfp_mask & __GFP_NORETRY)
2452 		goto nomem;
2453 	/*
2454 	 * Even though the limit is exceeded at this point, reclaim
2455 	 * may have been able to free some pages.  Retry the charge
2456 	 * before killing the task.
2457 	 *
2458 	 * Only for regular pages, though: huge pages are rather
2459 	 * unlikely to succeed so close to the limit, and we fall back
2460 	 * to regular pages anyway in case of failure.
2461 	 */
2462 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2463 		goto retry;
2464 
2465 	if (nr_retries--)
2466 		goto retry;
2467 
2468 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2469 		goto nomem;
2470 
2471 	/* Avoid endless loop for tasks bypassed by the oom killer */
2472 	if (passed_oom && task_is_dying())
2473 		goto nomem;
2474 
2475 	/*
2476 	 * keep retrying as long as the memcg oom killer is able to make
2477 	 * a forward progress or bypass the charge if the oom killer
2478 	 * couldn't make any progress.
2479 	 */
2480 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2481 			   get_order(nr_pages * PAGE_SIZE))) {
2482 		passed_oom = true;
2483 		nr_retries = MAX_RECLAIM_RETRIES;
2484 		goto retry;
2485 	}
2486 nomem:
2487 	/*
2488 	 * Memcg doesn't have a dedicated reserve for atomic
2489 	 * allocations. But like the global atomic pool, we need to
2490 	 * put the burden of reclaim on regular allocation requests
2491 	 * and let these go through as privileged allocations.
2492 	 */
2493 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2494 		return -ENOMEM;
2495 force:
2496 	/*
2497 	 * If the allocation has to be enforced, don't forget to raise
2498 	 * a MEMCG_MAX event.
2499 	 */
2500 	if (!raised_max_event)
2501 		__memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2502 
2503 	/*
2504 	 * The allocation either can't fail or will lead to more memory
2505 	 * being freed very soon.  Allow memory usage go over the limit
2506 	 * temporarily by force charging it.
2507 	 */
2508 	page_counter_charge(&memcg->memory, nr_pages);
2509 	if (do_memsw_account())
2510 		page_counter_charge(&memcg->memsw, nr_pages);
2511 
2512 	return 0;
2513 
2514 done_restock:
2515 	if (batch > nr_pages)
2516 		refill_stock(memcg, batch - nr_pages);
2517 
2518 	/*
2519 	 * If the hierarchy is above the normal consumption range, schedule
2520 	 * reclaim on returning to userland.  We can perform reclaim here
2521 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2522 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2523 	 * not recorded as it most likely matches current's and won't
2524 	 * change in the meantime.  As high limit is checked again before
2525 	 * reclaim, the cost of mismatch is negligible.
2526 	 */
2527 	do {
2528 		bool mem_high, swap_high;
2529 
2530 		mem_high = page_counter_read(&memcg->memory) >
2531 			READ_ONCE(memcg->memory.high);
2532 		swap_high = page_counter_read(&memcg->swap) >
2533 			READ_ONCE(memcg->swap.high);
2534 
2535 		/* Don't bother a random interrupted task */
2536 		if (!in_task()) {
2537 			if (mem_high) {
2538 				schedule_work(&memcg->high_work);
2539 				break;
2540 			}
2541 			continue;
2542 		}
2543 
2544 		if (mem_high || swap_high) {
2545 			/*
2546 			 * The allocating tasks in this cgroup will need to do
2547 			 * reclaim or be throttled to prevent further growth
2548 			 * of the memory or swap footprints.
2549 			 *
2550 			 * Target some best-effort fairness between the tasks,
2551 			 * and distribute reclaim work and delay penalties
2552 			 * based on how much each task is actually allocating.
2553 			 */
2554 			current->memcg_nr_pages_over_high += batch;
2555 			set_notify_resume(current);
2556 			break;
2557 		}
2558 	} while ((memcg = parent_mem_cgroup(memcg)));
2559 
2560 	/*
2561 	 * Reclaim is set up above to be called from the userland
2562 	 * return path. But also attempt synchronous reclaim to avoid
2563 	 * excessive overrun while the task is still inside the
2564 	 * kernel. If this is successful, the return path will see it
2565 	 * when it rechecks the overage and simply bail out.
2566 	 */
2567 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2568 	    !(current->flags & PF_MEMALLOC) &&
2569 	    gfpflags_allow_blocking(gfp_mask))
2570 		__mem_cgroup_handle_over_high(gfp_mask);
2571 	return 0;
2572 }
2573 
2574 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2575 			     unsigned int nr_pages)
2576 {
2577 	if (mem_cgroup_is_root(memcg))
2578 		return 0;
2579 
2580 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2581 }
2582 
2583 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2584 {
2585 	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2586 	/*
2587 	 * Any of the following ensures page's memcg stability:
2588 	 *
2589 	 * - the page lock
2590 	 * - LRU isolation
2591 	 * - exclusive reference
2592 	 */
2593 	folio->memcg_data = (unsigned long)memcg;
2594 }
2595 
2596 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
2597 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2598 					 struct pglist_data *pgdat,
2599 					 enum node_stat_item idx, int nr)
2600 {
2601 	struct lruvec *lruvec;
2602 
2603 	if (likely(!in_nmi())) {
2604 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
2605 		mod_memcg_lruvec_state(lruvec, idx, nr);
2606 	} else {
2607 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2608 
2609 		/* preemption is disabled in_nmi(). */
2610 		css_rstat_updated(&memcg->css, smp_processor_id());
2611 		if (idx == NR_SLAB_RECLAIMABLE_B)
2612 			atomic_add(nr, &pn->slab_reclaimable);
2613 		else
2614 			atomic_add(nr, &pn->slab_unreclaimable);
2615 	}
2616 }
2617 #else
2618 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2619 					 struct pglist_data *pgdat,
2620 					 enum node_stat_item idx, int nr)
2621 {
2622 	struct lruvec *lruvec;
2623 
2624 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2625 	mod_memcg_lruvec_state(lruvec, idx, nr);
2626 }
2627 #endif
2628 
2629 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2630 				       struct pglist_data *pgdat,
2631 				       enum node_stat_item idx, int nr)
2632 {
2633 	struct mem_cgroup *memcg;
2634 
2635 	rcu_read_lock();
2636 	memcg = obj_cgroup_memcg(objcg);
2637 	account_slab_nmi_safe(memcg, pgdat, idx, nr);
2638 	rcu_read_unlock();
2639 }
2640 
2641 static __always_inline
2642 struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
2643 {
2644 	/*
2645 	 * Slab objects are accounted individually, not per-page.
2646 	 * Memcg membership data for each individual object is saved in
2647 	 * slab->obj_exts.
2648 	 */
2649 	unsigned long obj_exts;
2650 	struct slabobj_ext *obj_ext;
2651 	unsigned int off;
2652 
2653 	obj_exts = slab_obj_exts(slab);
2654 	if (!obj_exts)
2655 		return NULL;
2656 
2657 	get_slab_obj_exts(obj_exts);
2658 	off = obj_to_index(slab->slab_cache, slab, p);
2659 	obj_ext = slab_obj_ext(slab, obj_exts, off);
2660 	if (obj_ext->objcg) {
2661 		struct obj_cgroup *objcg = obj_ext->objcg;
2662 
2663 		put_slab_obj_exts(obj_exts);
2664 		return obj_cgroup_memcg(objcg);
2665 	}
2666 	put_slab_obj_exts(obj_exts);
2667 
2668 	return NULL;
2669 }
2670 
2671 /*
2672  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2673  * It is not suitable for objects allocated using vmalloc().
2674  *
2675  * A passed kernel object must be a slab object or a generic kernel page.
2676  *
2677  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2678  * cgroup_mutex, etc.
2679  */
2680 struct mem_cgroup *mem_cgroup_from_virt(void *p)
2681 {
2682 	struct slab *slab;
2683 
2684 	if (mem_cgroup_disabled())
2685 		return NULL;
2686 
2687 	slab = virt_to_slab(p);
2688 	if (slab)
2689 		return mem_cgroup_from_obj_slab(slab, p);
2690 	return folio_memcg_check(virt_to_folio(p));
2691 }
2692 
2693 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2694 {
2695 	struct obj_cgroup *objcg = NULL;
2696 
2697 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2698 		objcg = rcu_dereference(memcg->objcg);
2699 		if (likely(objcg && obj_cgroup_tryget(objcg)))
2700 			break;
2701 		objcg = NULL;
2702 	}
2703 	return objcg;
2704 }
2705 
2706 static struct obj_cgroup *current_objcg_update(void)
2707 {
2708 	struct mem_cgroup *memcg;
2709 	struct obj_cgroup *old, *objcg = NULL;
2710 
2711 	do {
2712 		/* Atomically drop the update bit. */
2713 		old = xchg(&current->objcg, NULL);
2714 		if (old) {
2715 			old = (struct obj_cgroup *)
2716 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2717 			obj_cgroup_put(old);
2718 
2719 			old = NULL;
2720 		}
2721 
2722 		/* If new objcg is NULL, no reason for the second atomic update. */
2723 		if (!current->mm || (current->flags & PF_KTHREAD))
2724 			return NULL;
2725 
2726 		/*
2727 		 * Release the objcg pointer from the previous iteration,
2728 		 * if try_cmpxcg() below fails.
2729 		 */
2730 		if (unlikely(objcg)) {
2731 			obj_cgroup_put(objcg);
2732 			objcg = NULL;
2733 		}
2734 
2735 		/*
2736 		 * Obtain the new objcg pointer. The current task can be
2737 		 * asynchronously moved to another memcg and the previous
2738 		 * memcg can be offlined. So let's get the memcg pointer
2739 		 * and try get a reference to objcg under a rcu read lock.
2740 		 */
2741 
2742 		rcu_read_lock();
2743 		memcg = mem_cgroup_from_task(current);
2744 		objcg = __get_obj_cgroup_from_memcg(memcg);
2745 		rcu_read_unlock();
2746 
2747 		/*
2748 		 * Try set up a new objcg pointer atomically. If it
2749 		 * fails, it means the update flag was set concurrently, so
2750 		 * the whole procedure should be repeated.
2751 		 */
2752 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2753 
2754 	return objcg;
2755 }
2756 
2757 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2758 {
2759 	struct mem_cgroup *memcg;
2760 	struct obj_cgroup *objcg;
2761 
2762 	if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2763 		return NULL;
2764 
2765 	if (in_task()) {
2766 		memcg = current->active_memcg;
2767 		if (unlikely(memcg))
2768 			goto from_memcg;
2769 
2770 		objcg = READ_ONCE(current->objcg);
2771 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2772 			objcg = current_objcg_update();
2773 		/*
2774 		 * Objcg reference is kept by the task, so it's safe
2775 		 * to use the objcg by the current task.
2776 		 */
2777 		return objcg;
2778 	}
2779 
2780 	memcg = this_cpu_read(int_active_memcg);
2781 	if (unlikely(memcg))
2782 		goto from_memcg;
2783 
2784 	return NULL;
2785 
2786 from_memcg:
2787 	objcg = NULL;
2788 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2789 		/*
2790 		 * Memcg pointer is protected by scope (see set_active_memcg())
2791 		 * and is pinning the corresponding objcg, so objcg can't go
2792 		 * away and can be used within the scope without any additional
2793 		 * protection.
2794 		 */
2795 		objcg = rcu_dereference_check(memcg->objcg, 1);
2796 		if (likely(objcg))
2797 			break;
2798 	}
2799 
2800 	return objcg;
2801 }
2802 
2803 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2804 {
2805 	struct obj_cgroup *objcg;
2806 
2807 	if (!memcg_kmem_online())
2808 		return NULL;
2809 
2810 	if (folio_memcg_kmem(folio)) {
2811 		objcg = __folio_objcg(folio);
2812 		obj_cgroup_get(objcg);
2813 	} else {
2814 		struct mem_cgroup *memcg;
2815 
2816 		rcu_read_lock();
2817 		memcg = __folio_memcg(folio);
2818 		if (memcg)
2819 			objcg = __get_obj_cgroup_from_memcg(memcg);
2820 		else
2821 			objcg = NULL;
2822 		rcu_read_unlock();
2823 	}
2824 	return objcg;
2825 }
2826 
2827 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
2828 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2829 {
2830 	if (likely(!in_nmi())) {
2831 		mod_memcg_state(memcg, MEMCG_KMEM, val);
2832 	} else {
2833 		/* preemption is disabled in_nmi(). */
2834 		css_rstat_updated(&memcg->css, smp_processor_id());
2835 		atomic_add(val, &memcg->kmem_stat);
2836 	}
2837 }
2838 #else
2839 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2840 {
2841 	mod_memcg_state(memcg, MEMCG_KMEM, val);
2842 }
2843 #endif
2844 
2845 /*
2846  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2847  * @objcg: object cgroup to uncharge
2848  * @nr_pages: number of pages to uncharge
2849  */
2850 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2851 				      unsigned int nr_pages)
2852 {
2853 	struct mem_cgroup *memcg;
2854 
2855 	memcg = get_mem_cgroup_from_objcg(objcg);
2856 
2857 	account_kmem_nmi_safe(memcg, -nr_pages);
2858 	memcg1_account_kmem(memcg, -nr_pages);
2859 	if (!mem_cgroup_is_root(memcg))
2860 		refill_stock(memcg, nr_pages);
2861 
2862 	css_put(&memcg->css);
2863 }
2864 
2865 /*
2866  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2867  * @objcg: object cgroup to charge
2868  * @gfp: reclaim mode
2869  * @nr_pages: number of pages to charge
2870  *
2871  * Returns 0 on success, an error code on failure.
2872  */
2873 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2874 				   unsigned int nr_pages)
2875 {
2876 	struct mem_cgroup *memcg;
2877 	int ret;
2878 
2879 	memcg = get_mem_cgroup_from_objcg(objcg);
2880 
2881 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2882 	if (ret)
2883 		goto out;
2884 
2885 	account_kmem_nmi_safe(memcg, nr_pages);
2886 	memcg1_account_kmem(memcg, nr_pages);
2887 out:
2888 	css_put(&memcg->css);
2889 
2890 	return ret;
2891 }
2892 
2893 static struct obj_cgroup *page_objcg(const struct page *page)
2894 {
2895 	unsigned long memcg_data = page->memcg_data;
2896 
2897 	if (mem_cgroup_disabled() || !memcg_data)
2898 		return NULL;
2899 
2900 	VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2901 			page);
2902 	return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2903 }
2904 
2905 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2906 {
2907 	page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2908 }
2909 
2910 /**
2911  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2912  * @page: page to charge
2913  * @gfp: reclaim mode
2914  * @order: allocation order
2915  *
2916  * Returns 0 on success, an error code on failure.
2917  */
2918 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2919 {
2920 	struct obj_cgroup *objcg;
2921 	int ret = 0;
2922 
2923 	objcg = current_obj_cgroup();
2924 	if (objcg) {
2925 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2926 		if (!ret) {
2927 			obj_cgroup_get(objcg);
2928 			page_set_objcg(page, objcg);
2929 			return 0;
2930 		}
2931 	}
2932 	return ret;
2933 }
2934 
2935 /**
2936  * __memcg_kmem_uncharge_page: uncharge a kmem page
2937  * @page: page to uncharge
2938  * @order: allocation order
2939  */
2940 void __memcg_kmem_uncharge_page(struct page *page, int order)
2941 {
2942 	struct obj_cgroup *objcg = page_objcg(page);
2943 	unsigned int nr_pages = 1 << order;
2944 
2945 	if (!objcg)
2946 		return;
2947 
2948 	obj_cgroup_uncharge_pages(objcg, nr_pages);
2949 	page->memcg_data = 0;
2950 	obj_cgroup_put(objcg);
2951 }
2952 
2953 static void __account_obj_stock(struct obj_cgroup *objcg,
2954 				struct obj_stock_pcp *stock, int nr,
2955 				struct pglist_data *pgdat, enum node_stat_item idx)
2956 {
2957 	int *bytes;
2958 
2959 	/*
2960 	 * Save vmstat data in stock and skip vmstat array update unless
2961 	 * accumulating over a page of vmstat data or when pgdat changes.
2962 	 */
2963 	if (stock->cached_pgdat != pgdat) {
2964 		/* Flush the existing cached vmstat data */
2965 		struct pglist_data *oldpg = stock->cached_pgdat;
2966 
2967 		if (stock->nr_slab_reclaimable_b) {
2968 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2969 					  stock->nr_slab_reclaimable_b);
2970 			stock->nr_slab_reclaimable_b = 0;
2971 		}
2972 		if (stock->nr_slab_unreclaimable_b) {
2973 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2974 					  stock->nr_slab_unreclaimable_b);
2975 			stock->nr_slab_unreclaimable_b = 0;
2976 		}
2977 		stock->cached_pgdat = pgdat;
2978 	}
2979 
2980 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2981 					       : &stock->nr_slab_unreclaimable_b;
2982 	/*
2983 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2984 	 * cached locally at least once before pushing it out.
2985 	 */
2986 	if (!*bytes) {
2987 		*bytes = nr;
2988 		nr = 0;
2989 	} else {
2990 		*bytes += nr;
2991 		if (abs(*bytes) > PAGE_SIZE) {
2992 			nr = *bytes;
2993 			*bytes = 0;
2994 		} else {
2995 			nr = 0;
2996 		}
2997 	}
2998 	if (nr)
2999 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3000 }
3001 
3002 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3003 			      struct pglist_data *pgdat, enum node_stat_item idx)
3004 {
3005 	struct obj_stock_pcp *stock;
3006 	bool ret = false;
3007 
3008 	if (!local_trylock(&obj_stock.lock))
3009 		return ret;
3010 
3011 	stock = this_cpu_ptr(&obj_stock);
3012 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3013 		stock->nr_bytes -= nr_bytes;
3014 		ret = true;
3015 
3016 		if (pgdat)
3017 			__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
3018 	}
3019 
3020 	local_unlock(&obj_stock.lock);
3021 
3022 	return ret;
3023 }
3024 
3025 static void drain_obj_stock(struct obj_stock_pcp *stock)
3026 {
3027 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3028 
3029 	if (!old)
3030 		return;
3031 
3032 	if (stock->nr_bytes) {
3033 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3034 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3035 
3036 		if (nr_pages) {
3037 			struct mem_cgroup *memcg;
3038 
3039 			memcg = get_mem_cgroup_from_objcg(old);
3040 
3041 			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
3042 			memcg1_account_kmem(memcg, -nr_pages);
3043 			if (!mem_cgroup_is_root(memcg))
3044 				memcg_uncharge(memcg, nr_pages);
3045 
3046 			css_put(&memcg->css);
3047 		}
3048 
3049 		/*
3050 		 * The leftover is flushed to the centralized per-memcg value.
3051 		 * On the next attempt to refill obj stock it will be moved
3052 		 * to a per-cpu stock (probably, on an other CPU), see
3053 		 * refill_obj_stock().
3054 		 *
3055 		 * How often it's flushed is a trade-off between the memory
3056 		 * limit enforcement accuracy and potential CPU contention,
3057 		 * so it might be changed in the future.
3058 		 */
3059 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3060 		stock->nr_bytes = 0;
3061 	}
3062 
3063 	/*
3064 	 * Flush the vmstat data in current stock
3065 	 */
3066 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3067 		if (stock->nr_slab_reclaimable_b) {
3068 			mod_objcg_mlstate(old, stock->cached_pgdat,
3069 					  NR_SLAB_RECLAIMABLE_B,
3070 					  stock->nr_slab_reclaimable_b);
3071 			stock->nr_slab_reclaimable_b = 0;
3072 		}
3073 		if (stock->nr_slab_unreclaimable_b) {
3074 			mod_objcg_mlstate(old, stock->cached_pgdat,
3075 					  NR_SLAB_UNRECLAIMABLE_B,
3076 					  stock->nr_slab_unreclaimable_b);
3077 			stock->nr_slab_unreclaimable_b = 0;
3078 		}
3079 		stock->cached_pgdat = NULL;
3080 	}
3081 
3082 	WRITE_ONCE(stock->cached_objcg, NULL);
3083 	obj_cgroup_put(old);
3084 }
3085 
3086 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3087 				     struct mem_cgroup *root_memcg)
3088 {
3089 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3090 	struct mem_cgroup *memcg;
3091 	bool flush = false;
3092 
3093 	rcu_read_lock();
3094 	if (objcg) {
3095 		memcg = obj_cgroup_memcg(objcg);
3096 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3097 			flush = true;
3098 	}
3099 	rcu_read_unlock();
3100 
3101 	return flush;
3102 }
3103 
3104 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3105 		bool allow_uncharge, int nr_acct, struct pglist_data *pgdat,
3106 		enum node_stat_item idx)
3107 {
3108 	struct obj_stock_pcp *stock;
3109 	unsigned int nr_pages = 0;
3110 
3111 	if (!local_trylock(&obj_stock.lock)) {
3112 		if (pgdat)
3113 			mod_objcg_mlstate(objcg, pgdat, idx, nr_acct);
3114 		nr_pages = nr_bytes >> PAGE_SHIFT;
3115 		nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3116 		atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3117 		goto out;
3118 	}
3119 
3120 	stock = this_cpu_ptr(&obj_stock);
3121 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3122 		drain_obj_stock(stock);
3123 		obj_cgroup_get(objcg);
3124 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3125 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3126 		WRITE_ONCE(stock->cached_objcg, objcg);
3127 
3128 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3129 	}
3130 	stock->nr_bytes += nr_bytes;
3131 
3132 	if (pgdat)
3133 		__account_obj_stock(objcg, stock, nr_acct, pgdat, idx);
3134 
3135 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3136 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3137 		stock->nr_bytes &= (PAGE_SIZE - 1);
3138 	}
3139 
3140 	local_unlock(&obj_stock.lock);
3141 out:
3142 	if (nr_pages)
3143 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3144 }
3145 
3146 static int obj_cgroup_charge_account(struct obj_cgroup *objcg, gfp_t gfp, size_t size,
3147 				     struct pglist_data *pgdat, enum node_stat_item idx)
3148 {
3149 	unsigned int nr_pages, nr_bytes;
3150 	int ret;
3151 
3152 	if (likely(consume_obj_stock(objcg, size, pgdat, idx)))
3153 		return 0;
3154 
3155 	/*
3156 	 * In theory, objcg->nr_charged_bytes can have enough
3157 	 * pre-charged bytes to satisfy the allocation. However,
3158 	 * flushing objcg->nr_charged_bytes requires two atomic
3159 	 * operations, and objcg->nr_charged_bytes can't be big.
3160 	 * The shared objcg->nr_charged_bytes can also become a
3161 	 * performance bottleneck if all tasks of the same memcg are
3162 	 * trying to update it. So it's better to ignore it and try
3163 	 * grab some new pages. The stock's nr_bytes will be flushed to
3164 	 * objcg->nr_charged_bytes later on when objcg changes.
3165 	 *
3166 	 * The stock's nr_bytes may contain enough pre-charged bytes
3167 	 * to allow one less page from being charged, but we can't rely
3168 	 * on the pre-charged bytes not being changed outside of
3169 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3170 	 * pre-charged bytes as well when charging pages. To avoid a
3171 	 * page uncharge right after a page charge, we set the
3172 	 * allow_uncharge flag to false when calling refill_obj_stock()
3173 	 * to temporarily allow the pre-charged bytes to exceed the page
3174 	 * size limit. The maximum reachable value of the pre-charged
3175 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3176 	 * race.
3177 	 */
3178 	nr_pages = size >> PAGE_SHIFT;
3179 	nr_bytes = size & (PAGE_SIZE - 1);
3180 
3181 	if (nr_bytes)
3182 		nr_pages += 1;
3183 
3184 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3185 	if (!ret && (nr_bytes || pgdat))
3186 		refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0,
3187 					 false, size, pgdat, idx);
3188 
3189 	return ret;
3190 }
3191 
3192 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3193 {
3194 	return obj_cgroup_charge_account(objcg, gfp, size, NULL, 0);
3195 }
3196 
3197 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3198 {
3199 	refill_obj_stock(objcg, size, true, 0, NULL, 0);
3200 }
3201 
3202 static inline size_t obj_full_size(struct kmem_cache *s)
3203 {
3204 	/*
3205 	 * For each accounted object there is an extra space which is used
3206 	 * to store obj_cgroup membership. Charge it too.
3207 	 */
3208 	return s->size + sizeof(struct obj_cgroup *);
3209 }
3210 
3211 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3212 				  gfp_t flags, size_t size, void **p)
3213 {
3214 	struct obj_cgroup *objcg;
3215 	struct slab *slab;
3216 	unsigned long off;
3217 	size_t i;
3218 
3219 	/*
3220 	 * The obtained objcg pointer is safe to use within the current scope,
3221 	 * defined by current task or set_active_memcg() pair.
3222 	 * obj_cgroup_get() is used to get a permanent reference.
3223 	 */
3224 	objcg = current_obj_cgroup();
3225 	if (!objcg)
3226 		return true;
3227 
3228 	/*
3229 	 * slab_alloc_node() avoids the NULL check, so we might be called with a
3230 	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3231 	 * the whole requested size.
3232 	 * return success as there's nothing to free back
3233 	 */
3234 	if (unlikely(*p == NULL))
3235 		return true;
3236 
3237 	flags &= gfp_allowed_mask;
3238 
3239 	if (lru) {
3240 		int ret;
3241 		struct mem_cgroup *memcg;
3242 
3243 		memcg = get_mem_cgroup_from_objcg(objcg);
3244 		ret = memcg_list_lru_alloc(memcg, lru, flags);
3245 		css_put(&memcg->css);
3246 
3247 		if (ret)
3248 			return false;
3249 	}
3250 
3251 	for (i = 0; i < size; i++) {
3252 		unsigned long obj_exts;
3253 		struct slabobj_ext *obj_ext;
3254 
3255 		slab = virt_to_slab(p[i]);
3256 
3257 		if (!slab_obj_exts(slab) &&
3258 		    alloc_slab_obj_exts(slab, s, flags, false)) {
3259 			continue;
3260 		}
3261 
3262 		/*
3263 		 * if we fail and size is 1, memcg_alloc_abort_single() will
3264 		 * just free the object, which is ok as we have not assigned
3265 		 * objcg to its obj_ext yet
3266 		 *
3267 		 * for larger sizes, kmem_cache_free_bulk() will uncharge
3268 		 * any objects that were already charged and obj_ext assigned
3269 		 *
3270 		 * TODO: we could batch this until slab_pgdat(slab) changes
3271 		 * between iterations, with a more complicated undo
3272 		 */
3273 		if (obj_cgroup_charge_account(objcg, flags, obj_full_size(s),
3274 					slab_pgdat(slab), cache_vmstat_idx(s)))
3275 			return false;
3276 
3277 		obj_exts = slab_obj_exts(slab);
3278 		get_slab_obj_exts(obj_exts);
3279 		off = obj_to_index(s, slab, p[i]);
3280 		obj_ext = slab_obj_ext(slab, obj_exts, off);
3281 		obj_cgroup_get(objcg);
3282 		obj_ext->objcg = objcg;
3283 		put_slab_obj_exts(obj_exts);
3284 	}
3285 
3286 	return true;
3287 }
3288 
3289 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3290 			    void **p, int objects, unsigned long obj_exts)
3291 {
3292 	size_t obj_size = obj_full_size(s);
3293 
3294 	for (int i = 0; i < objects; i++) {
3295 		struct obj_cgroup *objcg;
3296 		struct slabobj_ext *obj_ext;
3297 		unsigned int off;
3298 
3299 		off = obj_to_index(s, slab, p[i]);
3300 		obj_ext = slab_obj_ext(slab, obj_exts, off);
3301 		objcg = obj_ext->objcg;
3302 		if (!objcg)
3303 			continue;
3304 
3305 		obj_ext->objcg = NULL;
3306 		refill_obj_stock(objcg, obj_size, true, -obj_size,
3307 				 slab_pgdat(slab), cache_vmstat_idx(s));
3308 		obj_cgroup_put(objcg);
3309 	}
3310 }
3311 
3312 /*
3313  * The objcg is only set on the first page, so transfer it to all the
3314  * other pages.
3315  */
3316 void split_page_memcg(struct page *page, unsigned order)
3317 {
3318 	struct obj_cgroup *objcg = page_objcg(page);
3319 	unsigned int i, nr = 1 << order;
3320 
3321 	if (!objcg)
3322 		return;
3323 
3324 	for (i = 1; i < nr; i++)
3325 		page_set_objcg(&page[i], objcg);
3326 
3327 	obj_cgroup_get_many(objcg, nr - 1);
3328 }
3329 
3330 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3331 		unsigned new_order)
3332 {
3333 	unsigned new_refs;
3334 
3335 	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3336 		return;
3337 
3338 	new_refs = (1 << (old_order - new_order)) - 1;
3339 	css_get_many(&__folio_memcg(folio)->css, new_refs);
3340 }
3341 
3342 static int memcg_online_kmem(struct mem_cgroup *memcg)
3343 {
3344 	struct obj_cgroup *objcg;
3345 
3346 	if (mem_cgroup_kmem_disabled())
3347 		return 0;
3348 
3349 	if (unlikely(mem_cgroup_is_root(memcg)))
3350 		return 0;
3351 
3352 	objcg = obj_cgroup_alloc();
3353 	if (!objcg)
3354 		return -ENOMEM;
3355 
3356 	objcg->memcg = memcg;
3357 	rcu_assign_pointer(memcg->objcg, objcg);
3358 	obj_cgroup_get(objcg);
3359 	memcg->orig_objcg = objcg;
3360 
3361 	static_branch_enable(&memcg_kmem_online_key);
3362 
3363 	memcg->kmemcg_id = memcg->id.id;
3364 
3365 	return 0;
3366 }
3367 
3368 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3369 {
3370 	struct mem_cgroup *parent;
3371 
3372 	if (mem_cgroup_kmem_disabled())
3373 		return;
3374 
3375 	if (unlikely(mem_cgroup_is_root(memcg)))
3376 		return;
3377 
3378 	parent = parent_mem_cgroup(memcg);
3379 	if (!parent)
3380 		parent = root_mem_cgroup;
3381 
3382 	memcg_reparent_list_lrus(memcg, parent);
3383 
3384 	/*
3385 	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3386 	 * helpers won't use parent's list_lru until child is drained.
3387 	 */
3388 	memcg_reparent_objcgs(memcg, parent);
3389 }
3390 
3391 #ifdef CONFIG_CGROUP_WRITEBACK
3392 
3393 #include <trace/events/writeback.h>
3394 
3395 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3396 {
3397 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3398 }
3399 
3400 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3401 {
3402 	wb_domain_exit(&memcg->cgwb_domain);
3403 }
3404 
3405 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3406 {
3407 	wb_domain_size_changed(&memcg->cgwb_domain);
3408 }
3409 
3410 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3411 {
3412 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3413 
3414 	if (!memcg->css.parent)
3415 		return NULL;
3416 
3417 	return &memcg->cgwb_domain;
3418 }
3419 
3420 /**
3421  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3422  * @wb: bdi_writeback in question
3423  * @pfilepages: out parameter for number of file pages
3424  * @pheadroom: out parameter for number of allocatable pages according to memcg
3425  * @pdirty: out parameter for number of dirty pages
3426  * @pwriteback: out parameter for number of pages under writeback
3427  *
3428  * Determine the numbers of file, headroom, dirty, and writeback pages in
3429  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3430  * is a bit more involved.
3431  *
3432  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3433  * headroom is calculated as the lowest headroom of itself and the
3434  * ancestors.  Note that this doesn't consider the actual amount of
3435  * available memory in the system.  The caller should further cap
3436  * *@pheadroom accordingly.
3437  */
3438 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3439 			 unsigned long *pheadroom, unsigned long *pdirty,
3440 			 unsigned long *pwriteback)
3441 {
3442 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3443 	struct mem_cgroup *parent;
3444 
3445 	mem_cgroup_flush_stats_ratelimited(memcg);
3446 
3447 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3448 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3449 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3450 			memcg_page_state(memcg, NR_ACTIVE_FILE);
3451 
3452 	*pheadroom = PAGE_COUNTER_MAX;
3453 	while ((parent = parent_mem_cgroup(memcg))) {
3454 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3455 					    READ_ONCE(memcg->memory.high));
3456 		unsigned long used = page_counter_read(&memcg->memory);
3457 
3458 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3459 		memcg = parent;
3460 	}
3461 }
3462 
3463 /*
3464  * Foreign dirty flushing
3465  *
3466  * There's an inherent mismatch between memcg and writeback.  The former
3467  * tracks ownership per-page while the latter per-inode.  This was a
3468  * deliberate design decision because honoring per-page ownership in the
3469  * writeback path is complicated, may lead to higher CPU and IO overheads
3470  * and deemed unnecessary given that write-sharing an inode across
3471  * different cgroups isn't a common use-case.
3472  *
3473  * Combined with inode majority-writer ownership switching, this works well
3474  * enough in most cases but there are some pathological cases.  For
3475  * example, let's say there are two cgroups A and B which keep writing to
3476  * different but confined parts of the same inode.  B owns the inode and
3477  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3478  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3479  * triggering background writeback.  A will be slowed down without a way to
3480  * make writeback of the dirty pages happen.
3481  *
3482  * Conditions like the above can lead to a cgroup getting repeatedly and
3483  * severely throttled after making some progress after each
3484  * dirty_expire_interval while the underlying IO device is almost
3485  * completely idle.
3486  *
3487  * Solving this problem completely requires matching the ownership tracking
3488  * granularities between memcg and writeback in either direction.  However,
3489  * the more egregious behaviors can be avoided by simply remembering the
3490  * most recent foreign dirtying events and initiating remote flushes on
3491  * them when local writeback isn't enough to keep the memory clean enough.
3492  *
3493  * The following two functions implement such mechanism.  When a foreign
3494  * page - a page whose memcg and writeback ownerships don't match - is
3495  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3496  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3497  * decides that the memcg needs to sleep due to high dirty ratio, it calls
3498  * mem_cgroup_flush_foreign() which queues writeback on the recorded
3499  * foreign bdi_writebacks which haven't expired.  Both the numbers of
3500  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3501  * limited to MEMCG_CGWB_FRN_CNT.
3502  *
3503  * The mechanism only remembers IDs and doesn't hold any object references.
3504  * As being wrong occasionally doesn't matter, updates and accesses to the
3505  * records are lockless and racy.
3506  */
3507 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3508 					     struct bdi_writeback *wb)
3509 {
3510 	struct mem_cgroup *memcg = folio_memcg(folio);
3511 	struct memcg_cgwb_frn *frn;
3512 	u64 now = get_jiffies_64();
3513 	u64 oldest_at = now;
3514 	int oldest = -1;
3515 	int i;
3516 
3517 	trace_track_foreign_dirty(folio, wb);
3518 
3519 	/*
3520 	 * Pick the slot to use.  If there is already a slot for @wb, keep
3521 	 * using it.  If not replace the oldest one which isn't being
3522 	 * written out.
3523 	 */
3524 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3525 		frn = &memcg->cgwb_frn[i];
3526 		if (frn->bdi_id == wb->bdi->id &&
3527 		    frn->memcg_id == wb->memcg_css->id)
3528 			break;
3529 		if (time_before64(frn->at, oldest_at) &&
3530 		    atomic_read(&frn->done.cnt) == 1) {
3531 			oldest = i;
3532 			oldest_at = frn->at;
3533 		}
3534 	}
3535 
3536 	if (i < MEMCG_CGWB_FRN_CNT) {
3537 		/*
3538 		 * Re-using an existing one.  Update timestamp lazily to
3539 		 * avoid making the cacheline hot.  We want them to be
3540 		 * reasonably up-to-date and significantly shorter than
3541 		 * dirty_expire_interval as that's what expires the record.
3542 		 * Use the shorter of 1s and dirty_expire_interval / 8.
3543 		 */
3544 		unsigned long update_intv =
3545 			min_t(unsigned long, HZ,
3546 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3547 
3548 		if (time_before64(frn->at, now - update_intv))
3549 			frn->at = now;
3550 	} else if (oldest >= 0) {
3551 		/* replace the oldest free one */
3552 		frn = &memcg->cgwb_frn[oldest];
3553 		frn->bdi_id = wb->bdi->id;
3554 		frn->memcg_id = wb->memcg_css->id;
3555 		frn->at = now;
3556 	}
3557 }
3558 
3559 /* issue foreign writeback flushes for recorded foreign dirtying events */
3560 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3561 {
3562 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3563 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3564 	u64 now = jiffies_64;
3565 	int i;
3566 
3567 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3568 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3569 
3570 		/*
3571 		 * If the record is older than dirty_expire_interval,
3572 		 * writeback on it has already started.  No need to kick it
3573 		 * off again.  Also, don't start a new one if there's
3574 		 * already one in flight.
3575 		 */
3576 		if (time_after64(frn->at, now - intv) &&
3577 		    atomic_read(&frn->done.cnt) == 1) {
3578 			frn->at = 0;
3579 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3580 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3581 					       WB_REASON_FOREIGN_FLUSH,
3582 					       &frn->done);
3583 		}
3584 	}
3585 }
3586 
3587 #else	/* CONFIG_CGROUP_WRITEBACK */
3588 
3589 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3590 {
3591 	return 0;
3592 }
3593 
3594 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3595 {
3596 }
3597 
3598 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3599 {
3600 }
3601 
3602 #endif	/* CONFIG_CGROUP_WRITEBACK */
3603 
3604 /*
3605  * Private memory cgroup IDR
3606  *
3607  * Swap-out records and page cache shadow entries need to store memcg
3608  * references in constrained space, so we maintain an ID space that is
3609  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3610  * memory-controlled cgroups to 64k.
3611  *
3612  * However, there usually are many references to the offline CSS after
3613  * the cgroup has been destroyed, such as page cache or reclaimable
3614  * slab objects, that don't need to hang on to the ID. We want to keep
3615  * those dead CSS from occupying IDs, or we might quickly exhaust the
3616  * relatively small ID space and prevent the creation of new cgroups
3617  * even when there are much fewer than 64k cgroups - possibly none.
3618  *
3619  * Maintain a private 16-bit ID space for memcg, and allow the ID to
3620  * be freed and recycled when it's no longer needed, which is usually
3621  * when the CSS is offlined.
3622  *
3623  * The only exception to that are records of swapped out tmpfs/shmem
3624  * pages that need to be attributed to live ancestors on swapin. But
3625  * those references are manageable from userspace.
3626  */
3627 
3628 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3629 static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids);
3630 
3631 static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
3632 {
3633 	if (memcg->id.id > 0) {
3634 		xa_erase(&mem_cgroup_private_ids, memcg->id.id);
3635 		memcg->id.id = 0;
3636 	}
3637 }
3638 
3639 static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n)
3640 {
3641 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3642 		mem_cgroup_private_id_remove(memcg);
3643 
3644 		/* Memcg ID pins CSS */
3645 		css_put(&memcg->css);
3646 	}
3647 }
3648 
3649 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n)
3650 {
3651 	while (!refcount_add_not_zero(n, &memcg->id.ref)) {
3652 		/*
3653 		 * The root cgroup cannot be destroyed, so it's refcount must
3654 		 * always be >= 1.
3655 		 */
3656 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3657 			VM_BUG_ON(1);
3658 			break;
3659 		}
3660 		memcg = parent_mem_cgroup(memcg);
3661 		if (!memcg)
3662 			memcg = root_mem_cgroup;
3663 	}
3664 	return memcg;
3665 }
3666 
3667 /**
3668  * mem_cgroup_from_private_id - look up a memcg from a memcg id
3669  * @id: the memcg id to look up
3670  *
3671  * Caller must hold rcu_read_lock().
3672  */
3673 struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
3674 {
3675 	WARN_ON_ONCE(!rcu_read_lock_held());
3676 	return xa_load(&mem_cgroup_private_ids, id);
3677 }
3678 
3679 struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
3680 {
3681 	struct cgroup *cgrp;
3682 	struct cgroup_subsys_state *css;
3683 	struct mem_cgroup *memcg = NULL;
3684 
3685 	cgrp = cgroup_get_from_id(id);
3686 	if (IS_ERR(cgrp))
3687 		return NULL;
3688 
3689 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3690 	if (css)
3691 		memcg = container_of(css, struct mem_cgroup, css);
3692 
3693 	cgroup_put(cgrp);
3694 
3695 	return memcg;
3696 }
3697 
3698 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3699 {
3700 	if (!pn)
3701 		return;
3702 
3703 	free_percpu(pn->lruvec_stats_percpu);
3704 	kfree(pn->lruvec_stats);
3705 	kfree(pn);
3706 }
3707 
3708 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3709 {
3710 	struct mem_cgroup_per_node *pn;
3711 
3712 	pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3713 				   node);
3714 	if (!pn)
3715 		return false;
3716 
3717 	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3718 					GFP_KERNEL_ACCOUNT, node);
3719 	if (!pn->lruvec_stats)
3720 		goto fail;
3721 
3722 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3723 						   GFP_KERNEL_ACCOUNT);
3724 	if (!pn->lruvec_stats_percpu)
3725 		goto fail;
3726 
3727 	lruvec_init(&pn->lruvec);
3728 	pn->memcg = memcg;
3729 
3730 	memcg->nodeinfo[node] = pn;
3731 	return true;
3732 fail:
3733 	free_mem_cgroup_per_node_info(pn);
3734 	return false;
3735 }
3736 
3737 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3738 {
3739 	int node;
3740 
3741 	obj_cgroup_put(memcg->orig_objcg);
3742 
3743 	for_each_node(node)
3744 		free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3745 	memcg1_free_events(memcg);
3746 	kfree(memcg->vmstats);
3747 	free_percpu(memcg->vmstats_percpu);
3748 	kfree(memcg);
3749 }
3750 
3751 static void mem_cgroup_free(struct mem_cgroup *memcg)
3752 {
3753 	lru_gen_exit_memcg(memcg);
3754 	memcg_wb_domain_exit(memcg);
3755 	__mem_cgroup_free(memcg);
3756 }
3757 
3758 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3759 {
3760 	struct memcg_vmstats_percpu *statc;
3761 	struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3762 	struct mem_cgroup *memcg;
3763 	int node, cpu;
3764 	int __maybe_unused i;
3765 	long error;
3766 
3767 	memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3768 	if (!memcg)
3769 		return ERR_PTR(-ENOMEM);
3770 
3771 	error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
3772 			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3773 	if (error)
3774 		goto fail;
3775 	error = -ENOMEM;
3776 
3777 	memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT);
3778 	if (!memcg->vmstats)
3779 		goto fail;
3780 
3781 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3782 						 GFP_KERNEL_ACCOUNT);
3783 	if (!memcg->vmstats_percpu)
3784 		goto fail;
3785 
3786 	if (!memcg1_alloc_events(memcg))
3787 		goto fail;
3788 
3789 	for_each_possible_cpu(cpu) {
3790 		if (parent)
3791 			pstatc_pcpu = parent->vmstats_percpu;
3792 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3793 		statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
3794 		statc->vmstats = memcg->vmstats;
3795 	}
3796 
3797 	for_each_node(node)
3798 		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3799 			goto fail;
3800 
3801 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3802 		goto fail;
3803 
3804 	INIT_WORK(&memcg->high_work, high_work_func);
3805 	vmpressure_init(&memcg->vmpressure);
3806 	INIT_LIST_HEAD(&memcg->memory_peaks);
3807 	INIT_LIST_HEAD(&memcg->swap_peaks);
3808 	spin_lock_init(&memcg->peaks_lock);
3809 	memcg->socket_pressure = get_jiffies_64();
3810 #if BITS_PER_LONG < 64
3811 	seqlock_init(&memcg->socket_pressure_seqlock);
3812 #endif
3813 	memcg1_memcg_init(memcg);
3814 	memcg->kmemcg_id = -1;
3815 	INIT_LIST_HEAD(&memcg->objcg_list);
3816 #ifdef CONFIG_CGROUP_WRITEBACK
3817 	INIT_LIST_HEAD(&memcg->cgwb_list);
3818 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3819 		memcg->cgwb_frn[i].done =
3820 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3821 #endif
3822 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3823 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3824 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3825 	memcg->deferred_split_queue.split_queue_len = 0;
3826 #endif
3827 	lru_gen_init_memcg(memcg);
3828 	return memcg;
3829 fail:
3830 	mem_cgroup_private_id_remove(memcg);
3831 	__mem_cgroup_free(memcg);
3832 	return ERR_PTR(error);
3833 }
3834 
3835 static struct cgroup_subsys_state * __ref
3836 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3837 {
3838 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3839 	struct mem_cgroup *memcg, *old_memcg;
3840 	bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3841 
3842 	old_memcg = set_active_memcg(parent);
3843 	memcg = mem_cgroup_alloc(parent);
3844 	set_active_memcg(old_memcg);
3845 	if (IS_ERR(memcg))
3846 		return ERR_CAST(memcg);
3847 
3848 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3849 	memcg1_soft_limit_reset(memcg);
3850 #ifdef CONFIG_ZSWAP
3851 	memcg->zswap_max = PAGE_COUNTER_MAX;
3852 	WRITE_ONCE(memcg->zswap_writeback, true);
3853 #endif
3854 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3855 	if (parent) {
3856 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3857 
3858 		page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3859 		page_counter_init(&memcg->swap, &parent->swap, false);
3860 #ifdef CONFIG_MEMCG_V1
3861 		memcg->memory.track_failcnt = !memcg_on_dfl;
3862 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3863 		page_counter_init(&memcg->kmem, &parent->kmem, false);
3864 		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3865 #endif
3866 	} else {
3867 		init_memcg_stats();
3868 		init_memcg_events();
3869 		page_counter_init(&memcg->memory, NULL, true);
3870 		page_counter_init(&memcg->swap, NULL, false);
3871 #ifdef CONFIG_MEMCG_V1
3872 		page_counter_init(&memcg->kmem, NULL, false);
3873 		page_counter_init(&memcg->tcpmem, NULL, false);
3874 #endif
3875 		root_mem_cgroup = memcg;
3876 		return &memcg->css;
3877 	}
3878 
3879 	if (memcg_on_dfl && !cgroup_memory_nosocket)
3880 		static_branch_inc(&memcg_sockets_enabled_key);
3881 
3882 	if (!cgroup_memory_nobpf)
3883 		static_branch_inc(&memcg_bpf_enabled_key);
3884 
3885 	return &memcg->css;
3886 }
3887 
3888 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3889 {
3890 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3891 
3892 	if (memcg_online_kmem(memcg))
3893 		goto remove_id;
3894 
3895 	/*
3896 	 * A memcg must be visible for expand_shrinker_info()
3897 	 * by the time the maps are allocated. So, we allocate maps
3898 	 * here, when for_each_mem_cgroup() can't skip it.
3899 	 */
3900 	if (alloc_shrinker_info(memcg))
3901 		goto offline_kmem;
3902 
3903 	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3904 		queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
3905 				   FLUSH_TIME);
3906 	lru_gen_online_memcg(memcg);
3907 
3908 	/* Online state pins memcg ID, memcg ID pins CSS */
3909 	refcount_set(&memcg->id.ref, 1);
3910 	css_get(css);
3911 
3912 	/*
3913 	 * Ensure mem_cgroup_from_private_id() works once we're fully online.
3914 	 *
3915 	 * We could do this earlier and require callers to filter with
3916 	 * css_tryget_online(). But right now there are no users that
3917 	 * need earlier access, and the workingset code relies on the
3918 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3919 	 * publish it here at the end of onlining. This matches the
3920 	 * regular ID destruction during offlining.
3921 	 */
3922 	xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
3923 
3924 	return 0;
3925 offline_kmem:
3926 	memcg_offline_kmem(memcg);
3927 remove_id:
3928 	mem_cgroup_private_id_remove(memcg);
3929 	return -ENOMEM;
3930 }
3931 
3932 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3933 {
3934 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3935 
3936 	memcg1_css_offline(memcg);
3937 
3938 	page_counter_set_min(&memcg->memory, 0);
3939 	page_counter_set_low(&memcg->memory, 0);
3940 
3941 	zswap_memcg_offline_cleanup(memcg);
3942 
3943 	memcg_offline_kmem(memcg);
3944 	reparent_deferred_split_queue(memcg);
3945 	reparent_shrinker_deferred(memcg);
3946 	wb_memcg_offline(memcg);
3947 	lru_gen_offline_memcg(memcg);
3948 
3949 	drain_all_stock(memcg);
3950 
3951 	mem_cgroup_private_id_put(memcg, 1);
3952 }
3953 
3954 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3955 {
3956 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3957 
3958 	invalidate_reclaim_iterators(memcg);
3959 	lru_gen_release_memcg(memcg);
3960 }
3961 
3962 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3963 {
3964 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3965 	int __maybe_unused i;
3966 
3967 #ifdef CONFIG_CGROUP_WRITEBACK
3968 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3969 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3970 #endif
3971 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3972 		static_branch_dec(&memcg_sockets_enabled_key);
3973 
3974 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3975 		static_branch_dec(&memcg_sockets_enabled_key);
3976 
3977 	if (!cgroup_memory_nobpf)
3978 		static_branch_dec(&memcg_bpf_enabled_key);
3979 
3980 	vmpressure_cleanup(&memcg->vmpressure);
3981 	cancel_work_sync(&memcg->high_work);
3982 	memcg1_remove_from_trees(memcg);
3983 	free_shrinker_info(memcg);
3984 	mem_cgroup_free(memcg);
3985 }
3986 
3987 /**
3988  * mem_cgroup_css_reset - reset the states of a mem_cgroup
3989  * @css: the target css
3990  *
3991  * Reset the states of the mem_cgroup associated with @css.  This is
3992  * invoked when the userland requests disabling on the default hierarchy
3993  * but the memcg is pinned through dependency.  The memcg should stop
3994  * applying policies and should revert to the vanilla state as it may be
3995  * made visible again.
3996  *
3997  * The current implementation only resets the essential configurations.
3998  * This needs to be expanded to cover all the visible parts.
3999  */
4000 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4001 {
4002 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4003 
4004 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4005 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4006 #ifdef CONFIG_MEMCG_V1
4007 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4008 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
4009 #endif
4010 	page_counter_set_min(&memcg->memory, 0);
4011 	page_counter_set_low(&memcg->memory, 0);
4012 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4013 	memcg1_soft_limit_reset(memcg);
4014 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4015 	memcg_wb_domain_size_changed(memcg);
4016 }
4017 
4018 struct aggregate_control {
4019 	/* pointer to the aggregated (CPU and subtree aggregated) counters */
4020 	long *aggregate;
4021 	/* pointer to the non-hierarchichal (CPU aggregated) counters */
4022 	long *local;
4023 	/* pointer to the pending child counters during tree propagation */
4024 	long *pending;
4025 	/* pointer to the parent's pending counters, could be NULL */
4026 	long *ppending;
4027 	/* pointer to the percpu counters to be aggregated */
4028 	long *cstat;
4029 	/* pointer to the percpu counters of the last aggregation*/
4030 	long *cstat_prev;
4031 	/* size of the above counters */
4032 	int size;
4033 };
4034 
4035 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
4036 {
4037 	int i;
4038 	long delta, delta_cpu, v;
4039 
4040 	for (i = 0; i < ac->size; i++) {
4041 		/*
4042 		 * Collect the aggregated propagation counts of groups
4043 		 * below us. We're in a per-cpu loop here and this is
4044 		 * a global counter, so the first cycle will get them.
4045 		 */
4046 		delta = ac->pending[i];
4047 		if (delta)
4048 			ac->pending[i] = 0;
4049 
4050 		/* Add CPU changes on this level since the last flush */
4051 		delta_cpu = 0;
4052 		v = READ_ONCE(ac->cstat[i]);
4053 		if (v != ac->cstat_prev[i]) {
4054 			delta_cpu = v - ac->cstat_prev[i];
4055 			delta += delta_cpu;
4056 			ac->cstat_prev[i] = v;
4057 		}
4058 
4059 		/* Aggregate counts on this level and propagate upwards */
4060 		if (delta_cpu)
4061 			ac->local[i] += delta_cpu;
4062 
4063 		if (delta) {
4064 			ac->aggregate[i] += delta;
4065 			if (ac->ppending)
4066 				ac->ppending[i] += delta;
4067 		}
4068 	}
4069 }
4070 
4071 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
4072 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4073 			    int cpu)
4074 {
4075 	int nid;
4076 
4077 	if (atomic_read(&memcg->kmem_stat)) {
4078 		int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4079 		int index = memcg_stats_index(MEMCG_KMEM);
4080 
4081 		memcg->vmstats->state[index] += kmem;
4082 		if (parent)
4083 			parent->vmstats->state_pending[index] += kmem;
4084 	}
4085 
4086 	for_each_node_state(nid, N_MEMORY) {
4087 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4088 		struct lruvec_stats *lstats = pn->lruvec_stats;
4089 		struct lruvec_stats *plstats = NULL;
4090 
4091 		if (parent)
4092 			plstats = parent->nodeinfo[nid]->lruvec_stats;
4093 
4094 		if (atomic_read(&pn->slab_reclaimable)) {
4095 			int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4096 			int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4097 
4098 			lstats->state[index] += slab;
4099 			if (plstats)
4100 				plstats->state_pending[index] += slab;
4101 		}
4102 		if (atomic_read(&pn->slab_unreclaimable)) {
4103 			int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4104 			int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4105 
4106 			lstats->state[index] += slab;
4107 			if (plstats)
4108 				plstats->state_pending[index] += slab;
4109 		}
4110 	}
4111 }
4112 #else
4113 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4114 			    int cpu)
4115 {}
4116 #endif
4117 
4118 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4119 {
4120 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4121 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4122 	struct memcg_vmstats_percpu *statc;
4123 	struct aggregate_control ac;
4124 	int nid;
4125 
4126 	flush_nmi_stats(memcg, parent, cpu);
4127 
4128 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4129 
4130 	ac = (struct aggregate_control) {
4131 		.aggregate = memcg->vmstats->state,
4132 		.local = memcg->vmstats->state_local,
4133 		.pending = memcg->vmstats->state_pending,
4134 		.ppending = parent ? parent->vmstats->state_pending : NULL,
4135 		.cstat = statc->state,
4136 		.cstat_prev = statc->state_prev,
4137 		.size = MEMCG_VMSTAT_SIZE,
4138 	};
4139 	mem_cgroup_stat_aggregate(&ac);
4140 
4141 	ac = (struct aggregate_control) {
4142 		.aggregate = memcg->vmstats->events,
4143 		.local = memcg->vmstats->events_local,
4144 		.pending = memcg->vmstats->events_pending,
4145 		.ppending = parent ? parent->vmstats->events_pending : NULL,
4146 		.cstat = statc->events,
4147 		.cstat_prev = statc->events_prev,
4148 		.size = NR_MEMCG_EVENTS,
4149 	};
4150 	mem_cgroup_stat_aggregate(&ac);
4151 
4152 	for_each_node_state(nid, N_MEMORY) {
4153 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4154 		struct lruvec_stats *lstats = pn->lruvec_stats;
4155 		struct lruvec_stats *plstats = NULL;
4156 		struct lruvec_stats_percpu *lstatc;
4157 
4158 		if (parent)
4159 			plstats = parent->nodeinfo[nid]->lruvec_stats;
4160 
4161 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4162 
4163 		ac = (struct aggregate_control) {
4164 			.aggregate = lstats->state,
4165 			.local = lstats->state_local,
4166 			.pending = lstats->state_pending,
4167 			.ppending = plstats ? plstats->state_pending : NULL,
4168 			.cstat = lstatc->state,
4169 			.cstat_prev = lstatc->state_prev,
4170 			.size = NR_MEMCG_NODE_STAT_ITEMS,
4171 		};
4172 		mem_cgroup_stat_aggregate(&ac);
4173 
4174 	}
4175 	WRITE_ONCE(statc->stats_updates, 0);
4176 	/* We are in a per-cpu loop here, only do the atomic write once */
4177 	if (atomic_read(&memcg->vmstats->stats_updates))
4178 		atomic_set(&memcg->vmstats->stats_updates, 0);
4179 }
4180 
4181 static void mem_cgroup_fork(struct task_struct *task)
4182 {
4183 	/*
4184 	 * Set the update flag to cause task->objcg to be initialized lazily
4185 	 * on the first allocation. It can be done without any synchronization
4186 	 * because it's always performed on the current task, so does
4187 	 * current_objcg_update().
4188 	 */
4189 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4190 }
4191 
4192 static void mem_cgroup_exit(struct task_struct *task)
4193 {
4194 	struct obj_cgroup *objcg = task->objcg;
4195 
4196 	objcg = (struct obj_cgroup *)
4197 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4198 	obj_cgroup_put(objcg);
4199 
4200 	/*
4201 	 * Some kernel allocations can happen after this point,
4202 	 * but let's ignore them. It can be done without any synchronization
4203 	 * because it's always performed on the current task, so does
4204 	 * current_objcg_update().
4205 	 */
4206 	task->objcg = NULL;
4207 }
4208 
4209 #ifdef CONFIG_LRU_GEN
4210 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4211 {
4212 	struct task_struct *task;
4213 	struct cgroup_subsys_state *css;
4214 
4215 	/* find the first leader if there is any */
4216 	cgroup_taskset_for_each_leader(task, css, tset)
4217 		break;
4218 
4219 	if (!task)
4220 		return;
4221 
4222 	task_lock(task);
4223 	if (task->mm && READ_ONCE(task->mm->owner) == task)
4224 		lru_gen_migrate_mm(task->mm);
4225 	task_unlock(task);
4226 }
4227 #else
4228 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4229 #endif /* CONFIG_LRU_GEN */
4230 
4231 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4232 {
4233 	struct task_struct *task;
4234 	struct cgroup_subsys_state *css;
4235 
4236 	cgroup_taskset_for_each(task, css, tset) {
4237 		/* atomically set the update bit */
4238 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4239 	}
4240 }
4241 
4242 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4243 {
4244 	mem_cgroup_lru_gen_attach(tset);
4245 	mem_cgroup_kmem_attach(tset);
4246 }
4247 
4248 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4249 {
4250 	if (value == PAGE_COUNTER_MAX)
4251 		seq_puts(m, "max\n");
4252 	else
4253 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4254 
4255 	return 0;
4256 }
4257 
4258 static u64 memory_current_read(struct cgroup_subsys_state *css,
4259 			       struct cftype *cft)
4260 {
4261 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4262 
4263 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4264 }
4265 
4266 #define OFP_PEAK_UNSET (((-1UL)))
4267 
4268 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4269 {
4270 	struct cgroup_of_peak *ofp = of_peak(sf->private);
4271 	u64 fd_peak = READ_ONCE(ofp->value), peak;
4272 
4273 	/* User wants global or local peak? */
4274 	if (fd_peak == OFP_PEAK_UNSET)
4275 		peak = pc->watermark;
4276 	else
4277 		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4278 
4279 	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4280 	return 0;
4281 }
4282 
4283 static int memory_peak_show(struct seq_file *sf, void *v)
4284 {
4285 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4286 
4287 	return peak_show(sf, v, &memcg->memory);
4288 }
4289 
4290 static int peak_open(struct kernfs_open_file *of)
4291 {
4292 	struct cgroup_of_peak *ofp = of_peak(of);
4293 
4294 	ofp->value = OFP_PEAK_UNSET;
4295 	return 0;
4296 }
4297 
4298 static void peak_release(struct kernfs_open_file *of)
4299 {
4300 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4301 	struct cgroup_of_peak *ofp = of_peak(of);
4302 
4303 	if (ofp->value == OFP_PEAK_UNSET) {
4304 		/* fast path (no writes on this fd) */
4305 		return;
4306 	}
4307 	spin_lock(&memcg->peaks_lock);
4308 	list_del(&ofp->list);
4309 	spin_unlock(&memcg->peaks_lock);
4310 }
4311 
4312 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4313 			  loff_t off, struct page_counter *pc,
4314 			  struct list_head *watchers)
4315 {
4316 	unsigned long usage;
4317 	struct cgroup_of_peak *peer_ctx;
4318 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4319 	struct cgroup_of_peak *ofp = of_peak(of);
4320 
4321 	spin_lock(&memcg->peaks_lock);
4322 
4323 	usage = page_counter_read(pc);
4324 	WRITE_ONCE(pc->local_watermark, usage);
4325 
4326 	list_for_each_entry(peer_ctx, watchers, list)
4327 		if (usage > peer_ctx->value)
4328 			WRITE_ONCE(peer_ctx->value, usage);
4329 
4330 	/* initial write, register watcher */
4331 	if (ofp->value == OFP_PEAK_UNSET)
4332 		list_add(&ofp->list, watchers);
4333 
4334 	WRITE_ONCE(ofp->value, usage);
4335 	spin_unlock(&memcg->peaks_lock);
4336 
4337 	return nbytes;
4338 }
4339 
4340 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4341 				 size_t nbytes, loff_t off)
4342 {
4343 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4344 
4345 	return peak_write(of, buf, nbytes, off, &memcg->memory,
4346 			  &memcg->memory_peaks);
4347 }
4348 
4349 #undef OFP_PEAK_UNSET
4350 
4351 static int memory_min_show(struct seq_file *m, void *v)
4352 {
4353 	return seq_puts_memcg_tunable(m,
4354 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4355 }
4356 
4357 static ssize_t memory_min_write(struct kernfs_open_file *of,
4358 				char *buf, size_t nbytes, loff_t off)
4359 {
4360 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4361 	unsigned long min;
4362 	int err;
4363 
4364 	buf = strstrip(buf);
4365 	err = page_counter_memparse(buf, "max", &min);
4366 	if (err)
4367 		return err;
4368 
4369 	page_counter_set_min(&memcg->memory, min);
4370 
4371 	return nbytes;
4372 }
4373 
4374 static int memory_low_show(struct seq_file *m, void *v)
4375 {
4376 	return seq_puts_memcg_tunable(m,
4377 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4378 }
4379 
4380 static ssize_t memory_low_write(struct kernfs_open_file *of,
4381 				char *buf, size_t nbytes, loff_t off)
4382 {
4383 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4384 	unsigned long low;
4385 	int err;
4386 
4387 	buf = strstrip(buf);
4388 	err = page_counter_memparse(buf, "max", &low);
4389 	if (err)
4390 		return err;
4391 
4392 	page_counter_set_low(&memcg->memory, low);
4393 
4394 	return nbytes;
4395 }
4396 
4397 static int memory_high_show(struct seq_file *m, void *v)
4398 {
4399 	return seq_puts_memcg_tunable(m,
4400 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4401 }
4402 
4403 static ssize_t memory_high_write(struct kernfs_open_file *of,
4404 				 char *buf, size_t nbytes, loff_t off)
4405 {
4406 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4407 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4408 	bool drained = false;
4409 	unsigned long high;
4410 	int err;
4411 
4412 	buf = strstrip(buf);
4413 	err = page_counter_memparse(buf, "max", &high);
4414 	if (err)
4415 		return err;
4416 
4417 	page_counter_set_high(&memcg->memory, high);
4418 
4419 	if (of->file->f_flags & O_NONBLOCK)
4420 		goto out;
4421 
4422 	for (;;) {
4423 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4424 		unsigned long reclaimed;
4425 
4426 		if (nr_pages <= high)
4427 			break;
4428 
4429 		if (signal_pending(current))
4430 			break;
4431 
4432 		if (!drained) {
4433 			drain_all_stock(memcg);
4434 			drained = true;
4435 			continue;
4436 		}
4437 
4438 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4439 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4440 
4441 		if (!reclaimed && !nr_retries--)
4442 			break;
4443 	}
4444 out:
4445 	memcg_wb_domain_size_changed(memcg);
4446 	return nbytes;
4447 }
4448 
4449 static int memory_max_show(struct seq_file *m, void *v)
4450 {
4451 	return seq_puts_memcg_tunable(m,
4452 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4453 }
4454 
4455 static ssize_t memory_max_write(struct kernfs_open_file *of,
4456 				char *buf, size_t nbytes, loff_t off)
4457 {
4458 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4459 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4460 	bool drained = false;
4461 	unsigned long max;
4462 	int err;
4463 
4464 	buf = strstrip(buf);
4465 	err = page_counter_memparse(buf, "max", &max);
4466 	if (err)
4467 		return err;
4468 
4469 	xchg(&memcg->memory.max, max);
4470 
4471 	if (of->file->f_flags & O_NONBLOCK)
4472 		goto out;
4473 
4474 	for (;;) {
4475 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4476 
4477 		if (nr_pages <= max)
4478 			break;
4479 
4480 		if (signal_pending(current))
4481 			break;
4482 
4483 		if (!drained) {
4484 			drain_all_stock(memcg);
4485 			drained = true;
4486 			continue;
4487 		}
4488 
4489 		if (nr_reclaims) {
4490 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4491 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4492 				nr_reclaims--;
4493 			continue;
4494 		}
4495 
4496 		memcg_memory_event(memcg, MEMCG_OOM);
4497 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4498 			break;
4499 		cond_resched();
4500 	}
4501 out:
4502 	memcg_wb_domain_size_changed(memcg);
4503 	return nbytes;
4504 }
4505 
4506 /*
4507  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4508  * if any new events become available.
4509  */
4510 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4511 {
4512 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4513 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4514 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4515 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4516 	seq_printf(m, "oom_kill %lu\n",
4517 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4518 	seq_printf(m, "oom_group_kill %lu\n",
4519 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4520 	seq_printf(m, "sock_throttled %lu\n",
4521 		   atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
4522 }
4523 
4524 static int memory_events_show(struct seq_file *m, void *v)
4525 {
4526 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4527 
4528 	__memory_events_show(m, memcg->memory_events);
4529 	return 0;
4530 }
4531 
4532 static int memory_events_local_show(struct seq_file *m, void *v)
4533 {
4534 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4535 
4536 	__memory_events_show(m, memcg->memory_events_local);
4537 	return 0;
4538 }
4539 
4540 int memory_stat_show(struct seq_file *m, void *v)
4541 {
4542 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4543 	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4544 	struct seq_buf s;
4545 
4546 	if (!buf)
4547 		return -ENOMEM;
4548 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4549 	memory_stat_format(memcg, &s);
4550 	seq_puts(m, buf);
4551 	kfree(buf);
4552 	return 0;
4553 }
4554 
4555 #ifdef CONFIG_NUMA
4556 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4557 						     int item)
4558 {
4559 	return lruvec_page_state(lruvec, item) *
4560 		memcg_page_state_output_unit(item);
4561 }
4562 
4563 static int memory_numa_stat_show(struct seq_file *m, void *v)
4564 {
4565 	int i;
4566 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4567 
4568 	mem_cgroup_flush_stats(memcg);
4569 
4570 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4571 		int nid;
4572 
4573 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4574 			continue;
4575 
4576 		seq_printf(m, "%s", memory_stats[i].name);
4577 		for_each_node_state(nid, N_MEMORY) {
4578 			u64 size;
4579 			struct lruvec *lruvec;
4580 
4581 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4582 			size = lruvec_page_state_output(lruvec,
4583 							memory_stats[i].idx);
4584 			seq_printf(m, " N%d=%llu", nid, size);
4585 		}
4586 		seq_putc(m, '\n');
4587 	}
4588 
4589 	return 0;
4590 }
4591 #endif
4592 
4593 static int memory_oom_group_show(struct seq_file *m, void *v)
4594 {
4595 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4596 
4597 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4598 
4599 	return 0;
4600 }
4601 
4602 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4603 				      char *buf, size_t nbytes, loff_t off)
4604 {
4605 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4606 	int ret, oom_group;
4607 
4608 	buf = strstrip(buf);
4609 	if (!buf)
4610 		return -EINVAL;
4611 
4612 	ret = kstrtoint(buf, 0, &oom_group);
4613 	if (ret)
4614 		return ret;
4615 
4616 	if (oom_group != 0 && oom_group != 1)
4617 		return -EINVAL;
4618 
4619 	WRITE_ONCE(memcg->oom_group, oom_group);
4620 
4621 	return nbytes;
4622 }
4623 
4624 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4625 			      size_t nbytes, loff_t off)
4626 {
4627 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4628 	int ret;
4629 
4630 	ret = user_proactive_reclaim(buf, memcg, NULL);
4631 	if (ret)
4632 		return ret;
4633 
4634 	return nbytes;
4635 }
4636 
4637 static struct cftype memory_files[] = {
4638 	{
4639 		.name = "current",
4640 		.flags = CFTYPE_NOT_ON_ROOT,
4641 		.read_u64 = memory_current_read,
4642 	},
4643 	{
4644 		.name = "peak",
4645 		.flags = CFTYPE_NOT_ON_ROOT,
4646 		.open = peak_open,
4647 		.release = peak_release,
4648 		.seq_show = memory_peak_show,
4649 		.write = memory_peak_write,
4650 	},
4651 	{
4652 		.name = "min",
4653 		.flags = CFTYPE_NOT_ON_ROOT,
4654 		.seq_show = memory_min_show,
4655 		.write = memory_min_write,
4656 	},
4657 	{
4658 		.name = "low",
4659 		.flags = CFTYPE_NOT_ON_ROOT,
4660 		.seq_show = memory_low_show,
4661 		.write = memory_low_write,
4662 	},
4663 	{
4664 		.name = "high",
4665 		.flags = CFTYPE_NOT_ON_ROOT,
4666 		.seq_show = memory_high_show,
4667 		.write = memory_high_write,
4668 	},
4669 	{
4670 		.name = "max",
4671 		.flags = CFTYPE_NOT_ON_ROOT,
4672 		.seq_show = memory_max_show,
4673 		.write = memory_max_write,
4674 	},
4675 	{
4676 		.name = "events",
4677 		.flags = CFTYPE_NOT_ON_ROOT,
4678 		.file_offset = offsetof(struct mem_cgroup, events_file),
4679 		.seq_show = memory_events_show,
4680 	},
4681 	{
4682 		.name = "events.local",
4683 		.flags = CFTYPE_NOT_ON_ROOT,
4684 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4685 		.seq_show = memory_events_local_show,
4686 	},
4687 	{
4688 		.name = "stat",
4689 		.seq_show = memory_stat_show,
4690 	},
4691 #ifdef CONFIG_NUMA
4692 	{
4693 		.name = "numa_stat",
4694 		.seq_show = memory_numa_stat_show,
4695 	},
4696 #endif
4697 	{
4698 		.name = "oom.group",
4699 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4700 		.seq_show = memory_oom_group_show,
4701 		.write = memory_oom_group_write,
4702 	},
4703 	{
4704 		.name = "reclaim",
4705 		.flags = CFTYPE_NS_DELEGATABLE,
4706 		.write = memory_reclaim,
4707 	},
4708 	{ }	/* terminate */
4709 };
4710 
4711 struct cgroup_subsys memory_cgrp_subsys = {
4712 	.css_alloc = mem_cgroup_css_alloc,
4713 	.css_online = mem_cgroup_css_online,
4714 	.css_offline = mem_cgroup_css_offline,
4715 	.css_released = mem_cgroup_css_released,
4716 	.css_free = mem_cgroup_css_free,
4717 	.css_reset = mem_cgroup_css_reset,
4718 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4719 	.attach = mem_cgroup_attach,
4720 	.fork = mem_cgroup_fork,
4721 	.exit = mem_cgroup_exit,
4722 	.dfl_cftypes = memory_files,
4723 #ifdef CONFIG_MEMCG_V1
4724 	.legacy_cftypes = mem_cgroup_legacy_files,
4725 #endif
4726 	.early_init = 0,
4727 };
4728 
4729 /**
4730  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4731  * @root: the top ancestor of the sub-tree being checked
4732  * @memcg: the memory cgroup to check
4733  *
4734  * WARNING: This function is not stateless! It can only be used as part
4735  *          of a top-down tree iteration, not for isolated queries.
4736  */
4737 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4738 				     struct mem_cgroup *memcg)
4739 {
4740 	bool recursive_protection =
4741 		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4742 
4743 	if (mem_cgroup_disabled())
4744 		return;
4745 
4746 	if (!root)
4747 		root = root_mem_cgroup;
4748 
4749 	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4750 }
4751 
4752 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4753 			gfp_t gfp)
4754 {
4755 	int ret;
4756 
4757 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4758 	if (ret)
4759 		goto out;
4760 
4761 	css_get(&memcg->css);
4762 	commit_charge(folio, memcg);
4763 	memcg1_commit_charge(folio, memcg);
4764 out:
4765 	return ret;
4766 }
4767 
4768 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4769 {
4770 	struct mem_cgroup *memcg;
4771 	int ret;
4772 
4773 	memcg = get_mem_cgroup_from_mm(mm);
4774 	ret = charge_memcg(folio, memcg, gfp);
4775 	css_put(&memcg->css);
4776 
4777 	return ret;
4778 }
4779 
4780 /**
4781  * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4782  * @folio: folio being charged
4783  * @gfp: reclaim mode
4784  *
4785  * This function is called when allocating a huge page folio, after the page has
4786  * already been obtained and charged to the appropriate hugetlb cgroup
4787  * controller (if it is enabled).
4788  *
4789  * Returns ENOMEM if the memcg is already full.
4790  * Returns 0 if either the charge was successful, or if we skip the charging.
4791  */
4792 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4793 {
4794 	struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4795 	int ret = 0;
4796 
4797 	/*
4798 	 * Even memcg does not account for hugetlb, we still want to update
4799 	 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4800 	 * charging the memcg.
4801 	 */
4802 	if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4803 		!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4804 		goto out;
4805 
4806 	if (charge_memcg(folio, memcg, gfp))
4807 		ret = -ENOMEM;
4808 
4809 out:
4810 	mem_cgroup_put(memcg);
4811 	return ret;
4812 }
4813 
4814 /**
4815  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4816  * @folio: folio to charge.
4817  * @mm: mm context of the victim
4818  * @gfp: reclaim mode
4819  * @entry: swap entry for which the folio is allocated
4820  *
4821  * This function charges a folio allocated for swapin. Please call this before
4822  * adding the folio to the swapcache.
4823  *
4824  * Returns 0 on success. Otherwise, an error code is returned.
4825  */
4826 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4827 				  gfp_t gfp, swp_entry_t entry)
4828 {
4829 	struct mem_cgroup *memcg;
4830 	unsigned short id;
4831 	int ret;
4832 
4833 	if (mem_cgroup_disabled())
4834 		return 0;
4835 
4836 	id = lookup_swap_cgroup_id(entry);
4837 	rcu_read_lock();
4838 	memcg = mem_cgroup_from_private_id(id);
4839 	if (!memcg || !css_tryget_online(&memcg->css))
4840 		memcg = get_mem_cgroup_from_mm(mm);
4841 	rcu_read_unlock();
4842 
4843 	ret = charge_memcg(folio, memcg, gfp);
4844 
4845 	css_put(&memcg->css);
4846 	return ret;
4847 }
4848 
4849 struct uncharge_gather {
4850 	struct mem_cgroup *memcg;
4851 	unsigned long nr_memory;
4852 	unsigned long pgpgout;
4853 	unsigned long nr_kmem;
4854 	int nid;
4855 };
4856 
4857 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4858 {
4859 	memset(ug, 0, sizeof(*ug));
4860 }
4861 
4862 static void uncharge_batch(const struct uncharge_gather *ug)
4863 {
4864 	if (ug->nr_memory) {
4865 		memcg_uncharge(ug->memcg, ug->nr_memory);
4866 		if (ug->nr_kmem) {
4867 			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4868 			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4869 		}
4870 		memcg1_oom_recover(ug->memcg);
4871 	}
4872 
4873 	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4874 
4875 	/* drop reference from uncharge_folio */
4876 	css_put(&ug->memcg->css);
4877 }
4878 
4879 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4880 {
4881 	long nr_pages;
4882 	struct mem_cgroup *memcg;
4883 	struct obj_cgroup *objcg;
4884 
4885 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4886 
4887 	/*
4888 	 * Nobody should be changing or seriously looking at
4889 	 * folio memcg or objcg at this point, we have fully
4890 	 * exclusive access to the folio.
4891 	 */
4892 	if (folio_memcg_kmem(folio)) {
4893 		objcg = __folio_objcg(folio);
4894 		/*
4895 		 * This get matches the put at the end of the function and
4896 		 * kmem pages do not hold memcg references anymore.
4897 		 */
4898 		memcg = get_mem_cgroup_from_objcg(objcg);
4899 	} else {
4900 		memcg = __folio_memcg(folio);
4901 	}
4902 
4903 	if (!memcg)
4904 		return;
4905 
4906 	if (ug->memcg != memcg) {
4907 		if (ug->memcg) {
4908 			uncharge_batch(ug);
4909 			uncharge_gather_clear(ug);
4910 		}
4911 		ug->memcg = memcg;
4912 		ug->nid = folio_nid(folio);
4913 
4914 		/* pairs with css_put in uncharge_batch */
4915 		css_get(&memcg->css);
4916 	}
4917 
4918 	nr_pages = folio_nr_pages(folio);
4919 
4920 	if (folio_memcg_kmem(folio)) {
4921 		ug->nr_memory += nr_pages;
4922 		ug->nr_kmem += nr_pages;
4923 
4924 		folio->memcg_data = 0;
4925 		obj_cgroup_put(objcg);
4926 	} else {
4927 		/* LRU pages aren't accounted at the root level */
4928 		if (!mem_cgroup_is_root(memcg))
4929 			ug->nr_memory += nr_pages;
4930 		ug->pgpgout++;
4931 
4932 		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4933 		folio->memcg_data = 0;
4934 	}
4935 
4936 	css_put(&memcg->css);
4937 }
4938 
4939 void __mem_cgroup_uncharge(struct folio *folio)
4940 {
4941 	struct uncharge_gather ug;
4942 
4943 	/* Don't touch folio->lru of any random page, pre-check: */
4944 	if (!folio_memcg_charged(folio))
4945 		return;
4946 
4947 	uncharge_gather_clear(&ug);
4948 	uncharge_folio(folio, &ug);
4949 	uncharge_batch(&ug);
4950 }
4951 
4952 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4953 {
4954 	struct uncharge_gather ug;
4955 	unsigned int i;
4956 
4957 	uncharge_gather_clear(&ug);
4958 	for (i = 0; i < folios->nr; i++)
4959 		uncharge_folio(folios->folios[i], &ug);
4960 	if (ug.memcg)
4961 		uncharge_batch(&ug);
4962 }
4963 
4964 /**
4965  * mem_cgroup_replace_folio - Charge a folio's replacement.
4966  * @old: Currently circulating folio.
4967  * @new: Replacement folio.
4968  *
4969  * Charge @new as a replacement folio for @old. @old will
4970  * be uncharged upon free.
4971  *
4972  * Both folios must be locked, @new->mapping must be set up.
4973  */
4974 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4975 {
4976 	struct mem_cgroup *memcg;
4977 	long nr_pages = folio_nr_pages(new);
4978 
4979 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4980 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4981 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4982 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4983 
4984 	if (mem_cgroup_disabled())
4985 		return;
4986 
4987 	/* Page cache replacement: new folio already charged? */
4988 	if (folio_memcg_charged(new))
4989 		return;
4990 
4991 	memcg = folio_memcg(old);
4992 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4993 	if (!memcg)
4994 		return;
4995 
4996 	/* Force-charge the new page. The old one will be freed soon */
4997 	if (!mem_cgroup_is_root(memcg)) {
4998 		page_counter_charge(&memcg->memory, nr_pages);
4999 		if (do_memsw_account())
5000 			page_counter_charge(&memcg->memsw, nr_pages);
5001 	}
5002 
5003 	css_get(&memcg->css);
5004 	commit_charge(new, memcg);
5005 	memcg1_commit_charge(new, memcg);
5006 }
5007 
5008 /**
5009  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
5010  * @old: Currently circulating folio.
5011  * @new: Replacement folio.
5012  *
5013  * Transfer the memcg data from the old folio to the new folio for migration.
5014  * The old folio's data info will be cleared. Note that the memory counters
5015  * will remain unchanged throughout the process.
5016  *
5017  * Both folios must be locked, @new->mapping must be set up.
5018  */
5019 void mem_cgroup_migrate(struct folio *old, struct folio *new)
5020 {
5021 	struct mem_cgroup *memcg;
5022 
5023 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5024 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5025 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5026 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
5027 	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
5028 
5029 	if (mem_cgroup_disabled())
5030 		return;
5031 
5032 	memcg = folio_memcg(old);
5033 	/*
5034 	 * Note that it is normal to see !memcg for a hugetlb folio.
5035 	 * For e.g, it could have been allocated when memory_hugetlb_accounting
5036 	 * was not selected.
5037 	 */
5038 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
5039 	if (!memcg)
5040 		return;
5041 
5042 	/* Transfer the charge and the css ref */
5043 	commit_charge(new, memcg);
5044 
5045 	/* Warning should never happen, so don't worry about refcount non-0 */
5046 	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
5047 	old->memcg_data = 0;
5048 }
5049 
5050 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5051 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5052 
5053 void mem_cgroup_sk_alloc(struct sock *sk)
5054 {
5055 	struct mem_cgroup *memcg;
5056 
5057 	if (!mem_cgroup_sockets_enabled)
5058 		return;
5059 
5060 	/* Do not associate the sock with unrelated interrupted task's memcg. */
5061 	if (!in_task())
5062 		return;
5063 
5064 	rcu_read_lock();
5065 	memcg = mem_cgroup_from_task(current);
5066 	if (mem_cgroup_is_root(memcg))
5067 		goto out;
5068 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5069 		goto out;
5070 	if (css_tryget(&memcg->css))
5071 		sk->sk_memcg = memcg;
5072 out:
5073 	rcu_read_unlock();
5074 }
5075 
5076 void mem_cgroup_sk_free(struct sock *sk)
5077 {
5078 	struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5079 
5080 	if (memcg)
5081 		css_put(&memcg->css);
5082 }
5083 
5084 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
5085 {
5086 	struct mem_cgroup *memcg;
5087 
5088 	if (sk->sk_memcg == newsk->sk_memcg)
5089 		return;
5090 
5091 	mem_cgroup_sk_free(newsk);
5092 
5093 	memcg = mem_cgroup_from_sk(sk);
5094 	if (memcg)
5095 		css_get(&memcg->css);
5096 
5097 	newsk->sk_memcg = sk->sk_memcg;
5098 }
5099 
5100 /**
5101  * mem_cgroup_sk_charge - charge socket memory
5102  * @sk: socket in memcg to charge
5103  * @nr_pages: number of pages to charge
5104  * @gfp_mask: reclaim mode
5105  *
5106  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5107  * @memcg's configured limit, %false if it doesn't.
5108  */
5109 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
5110 			  gfp_t gfp_mask)
5111 {
5112 	struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5113 
5114 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5115 		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5116 
5117 	if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5118 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5119 		return true;
5120 	}
5121 
5122 	return false;
5123 }
5124 
5125 /**
5126  * mem_cgroup_sk_uncharge - uncharge socket memory
5127  * @sk: socket in memcg to uncharge
5128  * @nr_pages: number of pages to uncharge
5129  */
5130 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages)
5131 {
5132 	struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5133 
5134 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5135 		memcg1_uncharge_skmem(memcg, nr_pages);
5136 		return;
5137 	}
5138 
5139 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5140 
5141 	refill_stock(memcg, nr_pages);
5142 }
5143 
5144 void mem_cgroup_flush_workqueue(void)
5145 {
5146 	flush_workqueue(memcg_wq);
5147 }
5148 
5149 static int __init cgroup_memory(char *s)
5150 {
5151 	char *token;
5152 
5153 	while ((token = strsep(&s, ",")) != NULL) {
5154 		if (!*token)
5155 			continue;
5156 		if (!strcmp(token, "nosocket"))
5157 			cgroup_memory_nosocket = true;
5158 		if (!strcmp(token, "nokmem"))
5159 			cgroup_memory_nokmem = true;
5160 		if (!strcmp(token, "nobpf"))
5161 			cgroup_memory_nobpf = true;
5162 	}
5163 	return 1;
5164 }
5165 __setup("cgroup.memory=", cgroup_memory);
5166 
5167 /*
5168  * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5169  *
5170  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5171  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5172  * basically everything that doesn't depend on a specific mem_cgroup structure
5173  * should be initialized from here.
5174  */
5175 int __init mem_cgroup_init(void)
5176 {
5177 	unsigned int memcg_size;
5178 	int cpu;
5179 
5180 	/*
5181 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5182 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
5183 	 * to work fine, we should make sure that the overfill threshold can't
5184 	 * exceed S32_MAX / PAGE_SIZE.
5185 	 */
5186 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5187 
5188 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5189 				  memcg_hotplug_cpu_dead);
5190 
5191 	memcg_wq = alloc_workqueue("memcg", WQ_PERCPU, 0);
5192 	WARN_ON(!memcg_wq);
5193 
5194 	for_each_possible_cpu(cpu) {
5195 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5196 			  drain_local_memcg_stock);
5197 		INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5198 			  drain_local_obj_stock);
5199 	}
5200 
5201 	memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5202 	memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5203 					 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5204 
5205 	memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5206 				     SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5207 
5208 	return 0;
5209 }
5210 
5211 #ifdef CONFIG_SWAP
5212 /**
5213  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5214  * @folio: folio being added to swap
5215  * @entry: swap entry to charge
5216  *
5217  * Try to charge @folio's memcg for the swap space at @entry.
5218  *
5219  * Returns 0 on success, -ENOMEM on failure.
5220  */
5221 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5222 {
5223 	unsigned int nr_pages = folio_nr_pages(folio);
5224 	struct page_counter *counter;
5225 	struct mem_cgroup *memcg;
5226 
5227 	if (do_memsw_account())
5228 		return 0;
5229 
5230 	memcg = folio_memcg(folio);
5231 
5232 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5233 	if (!memcg)
5234 		return 0;
5235 
5236 	if (!entry.val) {
5237 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5238 		return 0;
5239 	}
5240 
5241 	memcg = mem_cgroup_private_id_get_online(memcg, nr_pages);
5242 
5243 	if (!mem_cgroup_is_root(memcg) &&
5244 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5245 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5246 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5247 		mem_cgroup_private_id_put(memcg, nr_pages);
5248 		return -ENOMEM;
5249 	}
5250 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5251 
5252 	swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
5253 
5254 	return 0;
5255 }
5256 
5257 /**
5258  * __mem_cgroup_uncharge_swap - uncharge swap space
5259  * @entry: swap entry to uncharge
5260  * @nr_pages: the amount of swap space to uncharge
5261  */
5262 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5263 {
5264 	struct mem_cgroup *memcg;
5265 	unsigned short id;
5266 
5267 	id = swap_cgroup_clear(entry, nr_pages);
5268 	rcu_read_lock();
5269 	memcg = mem_cgroup_from_private_id(id);
5270 	if (memcg) {
5271 		if (!mem_cgroup_is_root(memcg)) {
5272 			if (do_memsw_account())
5273 				page_counter_uncharge(&memcg->memsw, nr_pages);
5274 			else
5275 				page_counter_uncharge(&memcg->swap, nr_pages);
5276 		}
5277 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5278 		mem_cgroup_private_id_put(memcg, nr_pages);
5279 	}
5280 	rcu_read_unlock();
5281 }
5282 
5283 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5284 {
5285 	long nr_swap_pages = get_nr_swap_pages();
5286 
5287 	if (mem_cgroup_disabled() || do_memsw_account())
5288 		return nr_swap_pages;
5289 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5290 		nr_swap_pages = min_t(long, nr_swap_pages,
5291 				      READ_ONCE(memcg->swap.max) -
5292 				      page_counter_read(&memcg->swap));
5293 	return nr_swap_pages;
5294 }
5295 
5296 bool mem_cgroup_swap_full(struct folio *folio)
5297 {
5298 	struct mem_cgroup *memcg;
5299 
5300 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5301 
5302 	if (vm_swap_full())
5303 		return true;
5304 	if (do_memsw_account())
5305 		return false;
5306 
5307 	memcg = folio_memcg(folio);
5308 	if (!memcg)
5309 		return false;
5310 
5311 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5312 		unsigned long usage = page_counter_read(&memcg->swap);
5313 
5314 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5315 		    usage * 2 >= READ_ONCE(memcg->swap.max))
5316 			return true;
5317 	}
5318 
5319 	return false;
5320 }
5321 
5322 static int __init setup_swap_account(char *s)
5323 {
5324 	bool res;
5325 
5326 	if (!kstrtobool(s, &res) && !res)
5327 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5328 			     "in favor of configuring swap control via cgroupfs. "
5329 			     "Please report your usecase to linux-mm@kvack.org if you "
5330 			     "depend on this functionality.\n");
5331 	return 1;
5332 }
5333 __setup("swapaccount=", setup_swap_account);
5334 
5335 static u64 swap_current_read(struct cgroup_subsys_state *css,
5336 			     struct cftype *cft)
5337 {
5338 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5339 
5340 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5341 }
5342 
5343 static int swap_peak_show(struct seq_file *sf, void *v)
5344 {
5345 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5346 
5347 	return peak_show(sf, v, &memcg->swap);
5348 }
5349 
5350 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5351 			       size_t nbytes, loff_t off)
5352 {
5353 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5354 
5355 	return peak_write(of, buf, nbytes, off, &memcg->swap,
5356 			  &memcg->swap_peaks);
5357 }
5358 
5359 static int swap_high_show(struct seq_file *m, void *v)
5360 {
5361 	return seq_puts_memcg_tunable(m,
5362 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5363 }
5364 
5365 static ssize_t swap_high_write(struct kernfs_open_file *of,
5366 			       char *buf, size_t nbytes, loff_t off)
5367 {
5368 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5369 	unsigned long high;
5370 	int err;
5371 
5372 	buf = strstrip(buf);
5373 	err = page_counter_memparse(buf, "max", &high);
5374 	if (err)
5375 		return err;
5376 
5377 	page_counter_set_high(&memcg->swap, high);
5378 
5379 	return nbytes;
5380 }
5381 
5382 static int swap_max_show(struct seq_file *m, void *v)
5383 {
5384 	return seq_puts_memcg_tunable(m,
5385 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5386 }
5387 
5388 static ssize_t swap_max_write(struct kernfs_open_file *of,
5389 			      char *buf, size_t nbytes, loff_t off)
5390 {
5391 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5392 	unsigned long max;
5393 	int err;
5394 
5395 	buf = strstrip(buf);
5396 	err = page_counter_memparse(buf, "max", &max);
5397 	if (err)
5398 		return err;
5399 
5400 	xchg(&memcg->swap.max, max);
5401 
5402 	return nbytes;
5403 }
5404 
5405 static int swap_events_show(struct seq_file *m, void *v)
5406 {
5407 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5408 
5409 	seq_printf(m, "high %lu\n",
5410 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5411 	seq_printf(m, "max %lu\n",
5412 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5413 	seq_printf(m, "fail %lu\n",
5414 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5415 
5416 	return 0;
5417 }
5418 
5419 static struct cftype swap_files[] = {
5420 	{
5421 		.name = "swap.current",
5422 		.flags = CFTYPE_NOT_ON_ROOT,
5423 		.read_u64 = swap_current_read,
5424 	},
5425 	{
5426 		.name = "swap.high",
5427 		.flags = CFTYPE_NOT_ON_ROOT,
5428 		.seq_show = swap_high_show,
5429 		.write = swap_high_write,
5430 	},
5431 	{
5432 		.name = "swap.max",
5433 		.flags = CFTYPE_NOT_ON_ROOT,
5434 		.seq_show = swap_max_show,
5435 		.write = swap_max_write,
5436 	},
5437 	{
5438 		.name = "swap.peak",
5439 		.flags = CFTYPE_NOT_ON_ROOT,
5440 		.open = peak_open,
5441 		.release = peak_release,
5442 		.seq_show = swap_peak_show,
5443 		.write = swap_peak_write,
5444 	},
5445 	{
5446 		.name = "swap.events",
5447 		.flags = CFTYPE_NOT_ON_ROOT,
5448 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5449 		.seq_show = swap_events_show,
5450 	},
5451 	{ }	/* terminate */
5452 };
5453 
5454 #ifdef CONFIG_ZSWAP
5455 /**
5456  * obj_cgroup_may_zswap - check if this cgroup can zswap
5457  * @objcg: the object cgroup
5458  *
5459  * Check if the hierarchical zswap limit has been reached.
5460  *
5461  * This doesn't check for specific headroom, and it is not atomic
5462  * either. But with zswap, the size of the allocation is only known
5463  * once compression has occurred, and this optimistic pre-check avoids
5464  * spending cycles on compression when there is already no room left
5465  * or zswap is disabled altogether somewhere in the hierarchy.
5466  */
5467 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5468 {
5469 	struct mem_cgroup *memcg, *original_memcg;
5470 	bool ret = true;
5471 
5472 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5473 		return true;
5474 
5475 	original_memcg = get_mem_cgroup_from_objcg(objcg);
5476 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5477 	     memcg = parent_mem_cgroup(memcg)) {
5478 		unsigned long max = READ_ONCE(memcg->zswap_max);
5479 		unsigned long pages;
5480 
5481 		if (max == PAGE_COUNTER_MAX)
5482 			continue;
5483 		if (max == 0) {
5484 			ret = false;
5485 			break;
5486 		}
5487 
5488 		/* Force flush to get accurate stats for charging */
5489 		__mem_cgroup_flush_stats(memcg, true);
5490 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5491 		if (pages < max)
5492 			continue;
5493 		ret = false;
5494 		break;
5495 	}
5496 	mem_cgroup_put(original_memcg);
5497 	return ret;
5498 }
5499 
5500 /**
5501  * obj_cgroup_charge_zswap - charge compression backend memory
5502  * @objcg: the object cgroup
5503  * @size: size of compressed object
5504  *
5505  * This forces the charge after obj_cgroup_may_zswap() allowed
5506  * compression and storage in zswap for this cgroup to go ahead.
5507  */
5508 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5509 {
5510 	struct mem_cgroup *memcg;
5511 
5512 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5513 		return;
5514 
5515 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5516 
5517 	/* PF_MEMALLOC context, charging must succeed */
5518 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5519 		VM_WARN_ON_ONCE(1);
5520 
5521 	rcu_read_lock();
5522 	memcg = obj_cgroup_memcg(objcg);
5523 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5524 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5525 	if (size == PAGE_SIZE)
5526 		mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, 1);
5527 	rcu_read_unlock();
5528 }
5529 
5530 /**
5531  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5532  * @objcg: the object cgroup
5533  * @size: size of compressed object
5534  *
5535  * Uncharges zswap memory on page in.
5536  */
5537 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5538 {
5539 	struct mem_cgroup *memcg;
5540 
5541 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5542 		return;
5543 
5544 	obj_cgroup_uncharge(objcg, size);
5545 
5546 	rcu_read_lock();
5547 	memcg = obj_cgroup_memcg(objcg);
5548 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5549 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5550 	if (size == PAGE_SIZE)
5551 		mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, -1);
5552 	rcu_read_unlock();
5553 }
5554 
5555 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5556 {
5557 	/* if zswap is disabled, do not block pages going to the swapping device */
5558 	if (!zswap_is_enabled())
5559 		return true;
5560 
5561 	for (; memcg; memcg = parent_mem_cgroup(memcg))
5562 		if (!READ_ONCE(memcg->zswap_writeback))
5563 			return false;
5564 
5565 	return true;
5566 }
5567 
5568 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5569 			      struct cftype *cft)
5570 {
5571 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5572 
5573 	mem_cgroup_flush_stats(memcg);
5574 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5575 }
5576 
5577 static int zswap_max_show(struct seq_file *m, void *v)
5578 {
5579 	return seq_puts_memcg_tunable(m,
5580 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5581 }
5582 
5583 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5584 			       char *buf, size_t nbytes, loff_t off)
5585 {
5586 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5587 	unsigned long max;
5588 	int err;
5589 
5590 	buf = strstrip(buf);
5591 	err = page_counter_memparse(buf, "max", &max);
5592 	if (err)
5593 		return err;
5594 
5595 	xchg(&memcg->zswap_max, max);
5596 
5597 	return nbytes;
5598 }
5599 
5600 static int zswap_writeback_show(struct seq_file *m, void *v)
5601 {
5602 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5603 
5604 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5605 	return 0;
5606 }
5607 
5608 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5609 				char *buf, size_t nbytes, loff_t off)
5610 {
5611 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5612 	int zswap_writeback;
5613 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5614 
5615 	if (parse_ret)
5616 		return parse_ret;
5617 
5618 	if (zswap_writeback != 0 && zswap_writeback != 1)
5619 		return -EINVAL;
5620 
5621 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5622 	return nbytes;
5623 }
5624 
5625 static struct cftype zswap_files[] = {
5626 	{
5627 		.name = "zswap.current",
5628 		.flags = CFTYPE_NOT_ON_ROOT,
5629 		.read_u64 = zswap_current_read,
5630 	},
5631 	{
5632 		.name = "zswap.max",
5633 		.flags = CFTYPE_NOT_ON_ROOT,
5634 		.seq_show = zswap_max_show,
5635 		.write = zswap_max_write,
5636 	},
5637 	{
5638 		.name = "zswap.writeback",
5639 		.seq_show = zswap_writeback_show,
5640 		.write = zswap_writeback_write,
5641 	},
5642 	{ }	/* terminate */
5643 };
5644 #endif /* CONFIG_ZSWAP */
5645 
5646 static int __init mem_cgroup_swap_init(void)
5647 {
5648 	if (mem_cgroup_disabled())
5649 		return 0;
5650 
5651 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5652 #ifdef CONFIG_MEMCG_V1
5653 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5654 #endif
5655 #ifdef CONFIG_ZSWAP
5656 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5657 #endif
5658 	return 0;
5659 }
5660 subsys_initcall(mem_cgroup_swap_init);
5661 
5662 #endif /* CONFIG_SWAP */
5663 
5664 void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask)
5665 {
5666 	nodemask_t allowed;
5667 
5668 	if (!memcg)
5669 		return;
5670 
5671 	/*
5672 	 * Since this interface is intended for use by migration paths, and
5673 	 * reclaim and migration are subject to race conditions such as changes
5674 	 * in effective_mems and hot-unpluging of nodes, inaccurate allowed
5675 	 * mask is acceptable.
5676 	 */
5677 	cpuset_nodes_allowed(memcg->css.cgroup, &allowed);
5678 	nodes_and(*mask, *mask, allowed);
5679 }
5680 
5681 void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
5682 {
5683 	if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5684 		return;
5685 
5686 	if (!memcg)
5687 		memcg = root_mem_cgroup;
5688 
5689 	pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
5690 		K(atomic_long_read(&memcg->memory.children_min_usage)),
5691 		K(atomic_long_read(&memcg->memory.children_low_usage)));
5692 }
5693