xref: /linux/mm/memcontrol.c (revision 87a132e73910e8689902aed7f2fc229d6908383b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/pagevec.h>
37 #include <linux/vm_event_item.h>
38 #include <linux/smp.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
41 #include <linux/bit_spinlock.h>
42 #include <linux/rcupdate.h>
43 #include <linux/limits.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/mutex.h>
47 #include <linux/rbtree.h>
48 #include <linux/slab.h>
49 #include <linux/swapops.h>
50 #include <linux/spinlock.h>
51 #include <linux/fs.h>
52 #include <linux/seq_file.h>
53 #include <linux/parser.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71 
72 #include <linux/uaccess.h>
73 
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77 
78 #include <trace/events/vmscan.h>
79 
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82 
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
88 
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
91 
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
94 
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
97 
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101 
task_is_dying(void)102 static inline bool task_is_dying(void)
103 {
104 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
105 		(current->flags & PF_EXITING);
106 }
107 
108 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
110 {
111 	if (!memcg)
112 		memcg = root_mem_cgroup;
113 	return &memcg->vmpressure;
114 }
115 
vmpressure_to_memcg(struct vmpressure * vmpr)116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
117 {
118 	return container_of(vmpr, struct mem_cgroup, vmpressure);
119 }
120 
121 #define SEQ_BUF_SIZE SZ_4K
122 #define CURRENT_OBJCG_UPDATE_BIT 0
123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
124 
125 static DEFINE_SPINLOCK(objcg_lock);
126 
mem_cgroup_kmem_disabled(void)127 bool mem_cgroup_kmem_disabled(void)
128 {
129 	return cgroup_memory_nokmem;
130 }
131 
132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
133 				      unsigned int nr_pages);
134 
obj_cgroup_release(struct percpu_ref * ref)135 static void obj_cgroup_release(struct percpu_ref *ref)
136 {
137 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
138 	unsigned int nr_bytes;
139 	unsigned int nr_pages;
140 	unsigned long flags;
141 
142 	/*
143 	 * At this point all allocated objects are freed, and
144 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
145 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
146 	 *
147 	 * The following sequence can lead to it:
148 	 * 1) CPU0: objcg == stock->cached_objcg
149 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
150 	 *          PAGE_SIZE bytes are charged
151 	 * 3) CPU1: a process from another memcg is allocating something,
152 	 *          the stock if flushed,
153 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
154 	 * 5) CPU0: we do release this object,
155 	 *          92 bytes are added to stock->nr_bytes
156 	 * 6) CPU0: stock is flushed,
157 	 *          92 bytes are added to objcg->nr_charged_bytes
158 	 *
159 	 * In the result, nr_charged_bytes == PAGE_SIZE.
160 	 * This page will be uncharged in obj_cgroup_release().
161 	 */
162 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
163 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
164 	nr_pages = nr_bytes >> PAGE_SHIFT;
165 
166 	if (nr_pages)
167 		obj_cgroup_uncharge_pages(objcg, nr_pages);
168 
169 	spin_lock_irqsave(&objcg_lock, flags);
170 	list_del(&objcg->list);
171 	spin_unlock_irqrestore(&objcg_lock, flags);
172 
173 	percpu_ref_exit(ref);
174 	kfree_rcu(objcg, rcu);
175 }
176 
obj_cgroup_alloc(void)177 static struct obj_cgroup *obj_cgroup_alloc(void)
178 {
179 	struct obj_cgroup *objcg;
180 	int ret;
181 
182 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
183 	if (!objcg)
184 		return NULL;
185 
186 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
187 			      GFP_KERNEL);
188 	if (ret) {
189 		kfree(objcg);
190 		return NULL;
191 	}
192 	INIT_LIST_HEAD(&objcg->list);
193 	return objcg;
194 }
195 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
197 				  struct mem_cgroup *parent)
198 {
199 	struct obj_cgroup *objcg, *iter;
200 
201 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
202 
203 	spin_lock_irq(&objcg_lock);
204 
205 	/* 1) Ready to reparent active objcg. */
206 	list_add(&objcg->list, &memcg->objcg_list);
207 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
208 	list_for_each_entry(iter, &memcg->objcg_list, list)
209 		WRITE_ONCE(iter->memcg, parent);
210 	/* 3) Move already reparented objcgs to the parent's list */
211 	list_splice(&memcg->objcg_list, &parent->objcg_list);
212 
213 	spin_unlock_irq(&objcg_lock);
214 
215 	percpu_ref_kill(&objcg->refcnt);
216 }
217 
218 /*
219  * A lot of the calls to the cache allocation functions are expected to be
220  * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
221  * conditional to this static branch, we'll have to allow modules that does
222  * kmem_cache_alloc and the such to see this symbol as well
223  */
224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
225 EXPORT_SYMBOL(memcg_kmem_online_key);
226 
227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
228 EXPORT_SYMBOL(memcg_bpf_enabled_key);
229 
230 /**
231  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
232  * @folio: folio of interest
233  *
234  * If memcg is bound to the default hierarchy, css of the memcg associated
235  * with @folio is returned.  The returned css remains associated with @folio
236  * until it is released.
237  *
238  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
239  * is returned.
240  */
mem_cgroup_css_from_folio(struct folio * folio)241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
242 {
243 	struct mem_cgroup *memcg = folio_memcg(folio);
244 
245 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
246 		memcg = root_mem_cgroup;
247 
248 	return &memcg->css;
249 }
250 
251 /**
252  * page_cgroup_ino - return inode number of the memcg a page is charged to
253  * @page: the page
254  *
255  * Look up the closest online ancestor of the memory cgroup @page is charged to
256  * and return its inode number or 0 if @page is not charged to any cgroup. It
257  * is safe to call this function without holding a reference to @page.
258  *
259  * Note, this function is inherently racy, because there is nothing to prevent
260  * the cgroup inode from getting torn down and potentially reallocated a moment
261  * after page_cgroup_ino() returns, so it only should be used by callers that
262  * do not care (such as procfs interfaces).
263  */
page_cgroup_ino(struct page * page)264 ino_t page_cgroup_ino(struct page *page)
265 {
266 	struct mem_cgroup *memcg;
267 	unsigned long ino = 0;
268 
269 	rcu_read_lock();
270 	/* page_folio() is racy here, but the entire function is racy anyway */
271 	memcg = folio_memcg_check(page_folio(page));
272 
273 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
274 		memcg = parent_mem_cgroup(memcg);
275 	if (memcg)
276 		ino = cgroup_ino(memcg->css.cgroup);
277 	rcu_read_unlock();
278 	return ino;
279 }
280 
281 /* Subset of node_stat_item for memcg stats */
282 static const unsigned int memcg_node_stat_items[] = {
283 	NR_INACTIVE_ANON,
284 	NR_ACTIVE_ANON,
285 	NR_INACTIVE_FILE,
286 	NR_ACTIVE_FILE,
287 	NR_UNEVICTABLE,
288 	NR_SLAB_RECLAIMABLE_B,
289 	NR_SLAB_UNRECLAIMABLE_B,
290 	WORKINGSET_REFAULT_ANON,
291 	WORKINGSET_REFAULT_FILE,
292 	WORKINGSET_ACTIVATE_ANON,
293 	WORKINGSET_ACTIVATE_FILE,
294 	WORKINGSET_RESTORE_ANON,
295 	WORKINGSET_RESTORE_FILE,
296 	WORKINGSET_NODERECLAIM,
297 	NR_ANON_MAPPED,
298 	NR_FILE_MAPPED,
299 	NR_FILE_PAGES,
300 	NR_FILE_DIRTY,
301 	NR_WRITEBACK,
302 	NR_SHMEM,
303 	NR_SHMEM_THPS,
304 	NR_FILE_THPS,
305 	NR_ANON_THPS,
306 	NR_KERNEL_STACK_KB,
307 	NR_PAGETABLE,
308 	NR_SECONDARY_PAGETABLE,
309 #ifdef CONFIG_SWAP
310 	NR_SWAPCACHE,
311 #endif
312 #ifdef CONFIG_NUMA_BALANCING
313 	PGPROMOTE_SUCCESS,
314 #endif
315 	PGDEMOTE_KSWAPD,
316 	PGDEMOTE_DIRECT,
317 	PGDEMOTE_KHUGEPAGED,
318 #ifdef CONFIG_HUGETLB_PAGE
319 	NR_HUGETLB,
320 #endif
321 };
322 
323 static const unsigned int memcg_stat_items[] = {
324 	MEMCG_SWAP,
325 	MEMCG_SOCK,
326 	MEMCG_PERCPU_B,
327 	MEMCG_VMALLOC,
328 	MEMCG_KMEM,
329 	MEMCG_ZSWAP_B,
330 	MEMCG_ZSWAPPED,
331 };
332 
333 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
334 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
335 			   ARRAY_SIZE(memcg_stat_items))
336 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
337 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
338 
init_memcg_stats(void)339 static void init_memcg_stats(void)
340 {
341 	u8 i, j = 0;
342 
343 	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
344 
345 	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
346 
347 	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
348 		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
349 
350 	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
351 		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
352 }
353 
memcg_stats_index(int idx)354 static inline int memcg_stats_index(int idx)
355 {
356 	return mem_cgroup_stats_index[idx];
357 }
358 
359 struct lruvec_stats_percpu {
360 	/* Local (CPU and cgroup) state */
361 	long state[NR_MEMCG_NODE_STAT_ITEMS];
362 
363 	/* Delta calculation for lockless upward propagation */
364 	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
365 };
366 
367 struct lruvec_stats {
368 	/* Aggregated (CPU and subtree) state */
369 	long state[NR_MEMCG_NODE_STAT_ITEMS];
370 
371 	/* Non-hierarchical (CPU aggregated) state */
372 	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
373 
374 	/* Pending child counts during tree propagation */
375 	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
376 };
377 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)378 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
379 {
380 	struct mem_cgroup_per_node *pn;
381 	long x;
382 	int i;
383 
384 	if (mem_cgroup_disabled())
385 		return node_page_state(lruvec_pgdat(lruvec), idx);
386 
387 	i = memcg_stats_index(idx);
388 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
389 		return 0;
390 
391 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
392 	x = READ_ONCE(pn->lruvec_stats->state[i]);
393 #ifdef CONFIG_SMP
394 	if (x < 0)
395 		x = 0;
396 #endif
397 	return x;
398 }
399 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)400 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
401 				      enum node_stat_item idx)
402 {
403 	struct mem_cgroup_per_node *pn;
404 	long x;
405 	int i;
406 
407 	if (mem_cgroup_disabled())
408 		return node_page_state(lruvec_pgdat(lruvec), idx);
409 
410 	i = memcg_stats_index(idx);
411 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
412 		return 0;
413 
414 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
415 	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
416 #ifdef CONFIG_SMP
417 	if (x < 0)
418 		x = 0;
419 #endif
420 	return x;
421 }
422 
423 /* Subset of vm_event_item to report for memcg event stats */
424 static const unsigned int memcg_vm_event_stat[] = {
425 #ifdef CONFIG_MEMCG_V1
426 	PGPGIN,
427 	PGPGOUT,
428 #endif
429 	PSWPIN,
430 	PSWPOUT,
431 	PGSCAN_KSWAPD,
432 	PGSCAN_DIRECT,
433 	PGSCAN_KHUGEPAGED,
434 	PGSTEAL_KSWAPD,
435 	PGSTEAL_DIRECT,
436 	PGSTEAL_KHUGEPAGED,
437 	PGFAULT,
438 	PGMAJFAULT,
439 	PGREFILL,
440 	PGACTIVATE,
441 	PGDEACTIVATE,
442 	PGLAZYFREE,
443 	PGLAZYFREED,
444 #ifdef CONFIG_SWAP
445 	SWPIN_ZERO,
446 	SWPOUT_ZERO,
447 #endif
448 #ifdef CONFIG_ZSWAP
449 	ZSWPIN,
450 	ZSWPOUT,
451 	ZSWPWB,
452 #endif
453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
454 	THP_FAULT_ALLOC,
455 	THP_COLLAPSE_ALLOC,
456 	THP_SWPOUT,
457 	THP_SWPOUT_FALLBACK,
458 #endif
459 #ifdef CONFIG_NUMA_BALANCING
460 	NUMA_PAGE_MIGRATE,
461 	NUMA_PTE_UPDATES,
462 	NUMA_HINT_FAULTS,
463 #endif
464 };
465 
466 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
467 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
468 
init_memcg_events(void)469 static void init_memcg_events(void)
470 {
471 	u8 i;
472 
473 	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
474 
475 	memset(mem_cgroup_events_index, U8_MAX,
476 	       sizeof(mem_cgroup_events_index));
477 
478 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
479 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
480 }
481 
memcg_events_index(enum vm_event_item idx)482 static inline int memcg_events_index(enum vm_event_item idx)
483 {
484 	return mem_cgroup_events_index[idx];
485 }
486 
487 struct memcg_vmstats_percpu {
488 	/* Stats updates since the last flush */
489 	unsigned int			stats_updates;
490 
491 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
492 	struct memcg_vmstats_percpu	*parent;
493 	struct memcg_vmstats		*vmstats;
494 
495 	/* The above should fit a single cacheline for memcg_rstat_updated() */
496 
497 	/* Local (CPU and cgroup) page state & events */
498 	long			state[MEMCG_VMSTAT_SIZE];
499 	unsigned long		events[NR_MEMCG_EVENTS];
500 
501 	/* Delta calculation for lockless upward propagation */
502 	long			state_prev[MEMCG_VMSTAT_SIZE];
503 	unsigned long		events_prev[NR_MEMCG_EVENTS];
504 } ____cacheline_aligned;
505 
506 struct memcg_vmstats {
507 	/* Aggregated (CPU and subtree) page state & events */
508 	long			state[MEMCG_VMSTAT_SIZE];
509 	unsigned long		events[NR_MEMCG_EVENTS];
510 
511 	/* Non-hierarchical (CPU aggregated) page state & events */
512 	long			state_local[MEMCG_VMSTAT_SIZE];
513 	unsigned long		events_local[NR_MEMCG_EVENTS];
514 
515 	/* Pending child counts during tree propagation */
516 	long			state_pending[MEMCG_VMSTAT_SIZE];
517 	unsigned long		events_pending[NR_MEMCG_EVENTS];
518 
519 	/* Stats updates since the last flush */
520 	atomic64_t		stats_updates;
521 };
522 
523 /*
524  * memcg and lruvec stats flushing
525  *
526  * Many codepaths leading to stats update or read are performance sensitive and
527  * adding stats flushing in such codepaths is not desirable. So, to optimize the
528  * flushing the kernel does:
529  *
530  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
531  *    rstat update tree grow unbounded.
532  *
533  * 2) Flush the stats synchronously on reader side only when there are more than
534  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
535  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
536  *    only for 2 seconds due to (1).
537  */
538 static void flush_memcg_stats_dwork(struct work_struct *w);
539 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
540 static u64 flush_last_time;
541 
542 #define FLUSH_TIME (2UL*HZ)
543 
544 /*
545  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
546  * not rely on this as part of an acquired spinlock_t lock. These functions are
547  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
548  * is sufficient.
549  */
memcg_stats_lock(void)550 static void memcg_stats_lock(void)
551 {
552 	preempt_disable_nested();
553 	VM_WARN_ON_IRQS_ENABLED();
554 }
555 
__memcg_stats_lock(void)556 static void __memcg_stats_lock(void)
557 {
558 	preempt_disable_nested();
559 }
560 
memcg_stats_unlock(void)561 static void memcg_stats_unlock(void)
562 {
563 	preempt_enable_nested();
564 }
565 
566 
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)567 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
568 {
569 	return atomic64_read(&vmstats->stats_updates) >
570 		MEMCG_CHARGE_BATCH * num_online_cpus();
571 }
572 
memcg_rstat_updated(struct mem_cgroup * memcg,int val)573 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
574 {
575 	struct memcg_vmstats_percpu *statc;
576 	int cpu = smp_processor_id();
577 	unsigned int stats_updates;
578 
579 	if (!val)
580 		return;
581 
582 	cgroup_rstat_updated(memcg->css.cgroup, cpu);
583 	statc = this_cpu_ptr(memcg->vmstats_percpu);
584 	for (; statc; statc = statc->parent) {
585 		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
586 		WRITE_ONCE(statc->stats_updates, stats_updates);
587 		if (stats_updates < MEMCG_CHARGE_BATCH)
588 			continue;
589 
590 		/*
591 		 * If @memcg is already flush-able, increasing stats_updates is
592 		 * redundant. Avoid the overhead of the atomic update.
593 		 */
594 		if (!memcg_vmstats_needs_flush(statc->vmstats))
595 			atomic64_add(stats_updates,
596 				     &statc->vmstats->stats_updates);
597 		WRITE_ONCE(statc->stats_updates, 0);
598 	}
599 }
600 
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)601 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
602 {
603 	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
604 
605 	trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
606 		force, needs_flush);
607 
608 	if (!force && !needs_flush)
609 		return;
610 
611 	if (mem_cgroup_is_root(memcg))
612 		WRITE_ONCE(flush_last_time, jiffies_64);
613 
614 	cgroup_rstat_flush(memcg->css.cgroup);
615 }
616 
617 /*
618  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
619  * @memcg: root of the subtree to flush
620  *
621  * Flushing is serialized by the underlying global rstat lock. There is also a
622  * minimum amount of work to be done even if there are no stat updates to flush.
623  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
624  * avoids unnecessary work and contention on the underlying lock.
625  */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)626 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
627 {
628 	if (mem_cgroup_disabled())
629 		return;
630 
631 	if (!memcg)
632 		memcg = root_mem_cgroup;
633 
634 	__mem_cgroup_flush_stats(memcg, false);
635 }
636 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)637 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
638 {
639 	/* Only flush if the periodic flusher is one full cycle late */
640 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
641 		mem_cgroup_flush_stats(memcg);
642 }
643 
flush_memcg_stats_dwork(struct work_struct * w)644 static void flush_memcg_stats_dwork(struct work_struct *w)
645 {
646 	/*
647 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
648 	 * in latency-sensitive paths is as cheap as possible.
649 	 */
650 	__mem_cgroup_flush_stats(root_mem_cgroup, true);
651 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
652 }
653 
memcg_page_state(struct mem_cgroup * memcg,int idx)654 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
655 {
656 	long x;
657 	int i = memcg_stats_index(idx);
658 
659 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
660 		return 0;
661 
662 	x = READ_ONCE(memcg->vmstats->state[i]);
663 #ifdef CONFIG_SMP
664 	if (x < 0)
665 		x = 0;
666 #endif
667 	return x;
668 }
669 
670 static int memcg_page_state_unit(int item);
671 
672 /*
673  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
674  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
675  */
memcg_state_val_in_pages(int idx,int val)676 static int memcg_state_val_in_pages(int idx, int val)
677 {
678 	int unit = memcg_page_state_unit(idx);
679 
680 	if (!val || unit == PAGE_SIZE)
681 		return val;
682 	else
683 		return max(val * unit / PAGE_SIZE, 1UL);
684 }
685 
686 /**
687  * __mod_memcg_state - update cgroup memory statistics
688  * @memcg: the memory cgroup
689  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
690  * @val: delta to add to the counter, can be negative
691  */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)692 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
693 		       int val)
694 {
695 	int i = memcg_stats_index(idx);
696 
697 	if (mem_cgroup_disabled())
698 		return;
699 
700 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
701 		return;
702 
703 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
704 	val = memcg_state_val_in_pages(idx, val);
705 	memcg_rstat_updated(memcg, val);
706 	trace_mod_memcg_state(memcg, idx, val);
707 }
708 
709 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)710 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
711 {
712 	long x;
713 	int i = memcg_stats_index(idx);
714 
715 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
716 		return 0;
717 
718 	x = READ_ONCE(memcg->vmstats->state_local[i]);
719 #ifdef CONFIG_SMP
720 	if (x < 0)
721 		x = 0;
722 #endif
723 	return x;
724 }
725 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)726 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
727 				     enum node_stat_item idx,
728 				     int val)
729 {
730 	struct mem_cgroup_per_node *pn;
731 	struct mem_cgroup *memcg;
732 	int i = memcg_stats_index(idx);
733 
734 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
735 		return;
736 
737 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
738 	memcg = pn->memcg;
739 
740 	/*
741 	 * The caller from rmap relies on disabled preemption because they never
742 	 * update their counter from in-interrupt context. For these two
743 	 * counters we check that the update is never performed from an
744 	 * interrupt context while other caller need to have disabled interrupt.
745 	 */
746 	__memcg_stats_lock();
747 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
748 		switch (idx) {
749 		case NR_ANON_MAPPED:
750 		case NR_FILE_MAPPED:
751 		case NR_ANON_THPS:
752 			WARN_ON_ONCE(!in_task());
753 			break;
754 		default:
755 			VM_WARN_ON_IRQS_ENABLED();
756 		}
757 	}
758 
759 	/* Update memcg */
760 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
761 
762 	/* Update lruvec */
763 	__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
764 
765 	val = memcg_state_val_in_pages(idx, val);
766 	memcg_rstat_updated(memcg, val);
767 	trace_mod_memcg_lruvec_state(memcg, idx, val);
768 	memcg_stats_unlock();
769 }
770 
771 /**
772  * __mod_lruvec_state - update lruvec memory statistics
773  * @lruvec: the lruvec
774  * @idx: the stat item
775  * @val: delta to add to the counter, can be negative
776  *
777  * The lruvec is the intersection of the NUMA node and a cgroup. This
778  * function updates the all three counters that are affected by a
779  * change of state at this level: per-node, per-cgroup, per-lruvec.
780  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)781 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
782 			int val)
783 {
784 	/* Update node */
785 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
786 
787 	/* Update memcg and lruvec */
788 	if (!mem_cgroup_disabled())
789 		__mod_memcg_lruvec_state(lruvec, idx, val);
790 }
791 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)792 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
793 			     int val)
794 {
795 	struct mem_cgroup *memcg;
796 	pg_data_t *pgdat = folio_pgdat(folio);
797 	struct lruvec *lruvec;
798 
799 	rcu_read_lock();
800 	memcg = folio_memcg(folio);
801 	/* Untracked pages have no memcg, no lruvec. Update only the node */
802 	if (!memcg) {
803 		rcu_read_unlock();
804 		__mod_node_page_state(pgdat, idx, val);
805 		return;
806 	}
807 
808 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
809 	__mod_lruvec_state(lruvec, idx, val);
810 	rcu_read_unlock();
811 }
812 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
813 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)814 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
815 {
816 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
817 	struct mem_cgroup *memcg;
818 	struct lruvec *lruvec;
819 
820 	rcu_read_lock();
821 	memcg = mem_cgroup_from_slab_obj(p);
822 
823 	/*
824 	 * Untracked pages have no memcg, no lruvec. Update only the
825 	 * node. If we reparent the slab objects to the root memcg,
826 	 * when we free the slab object, we need to update the per-memcg
827 	 * vmstats to keep it correct for the root memcg.
828 	 */
829 	if (!memcg) {
830 		__mod_node_page_state(pgdat, idx, val);
831 	} else {
832 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
833 		__mod_lruvec_state(lruvec, idx, val);
834 	}
835 	rcu_read_unlock();
836 }
837 
838 /**
839  * __count_memcg_events - account VM events in a cgroup
840  * @memcg: the memory cgroup
841  * @idx: the event item
842  * @count: the number of events that occurred
843  */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)844 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
845 			  unsigned long count)
846 {
847 	int i = memcg_events_index(idx);
848 
849 	if (mem_cgroup_disabled())
850 		return;
851 
852 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
853 		return;
854 
855 	memcg_stats_lock();
856 	__this_cpu_add(memcg->vmstats_percpu->events[i], count);
857 	memcg_rstat_updated(memcg, count);
858 	trace_count_memcg_events(memcg, idx, count);
859 	memcg_stats_unlock();
860 }
861 
memcg_events(struct mem_cgroup * memcg,int event)862 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
863 {
864 	int i = memcg_events_index(event);
865 
866 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
867 		return 0;
868 
869 	return READ_ONCE(memcg->vmstats->events[i]);
870 }
871 
memcg_events_local(struct mem_cgroup * memcg,int event)872 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
873 {
874 	int i = memcg_events_index(event);
875 
876 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
877 		return 0;
878 
879 	return READ_ONCE(memcg->vmstats->events_local[i]);
880 }
881 
mem_cgroup_from_task(struct task_struct * p)882 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
883 {
884 	/*
885 	 * mm_update_next_owner() may clear mm->owner to NULL
886 	 * if it races with swapoff, page migration, etc.
887 	 * So this can be called with p == NULL.
888 	 */
889 	if (unlikely(!p))
890 		return NULL;
891 
892 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
893 }
894 EXPORT_SYMBOL(mem_cgroup_from_task);
895 
active_memcg(void)896 static __always_inline struct mem_cgroup *active_memcg(void)
897 {
898 	if (!in_task())
899 		return this_cpu_read(int_active_memcg);
900 	else
901 		return current->active_memcg;
902 }
903 
904 /**
905  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
906  * @mm: mm from which memcg should be extracted. It can be NULL.
907  *
908  * Obtain a reference on mm->memcg and returns it if successful. If mm
909  * is NULL, then the memcg is chosen as follows:
910  * 1) The active memcg, if set.
911  * 2) current->mm->memcg, if available
912  * 3) root memcg
913  * If mem_cgroup is disabled, NULL is returned.
914  */
get_mem_cgroup_from_mm(struct mm_struct * mm)915 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
916 {
917 	struct mem_cgroup *memcg;
918 
919 	if (mem_cgroup_disabled())
920 		return NULL;
921 
922 	/*
923 	 * Page cache insertions can happen without an
924 	 * actual mm context, e.g. during disk probing
925 	 * on boot, loopback IO, acct() writes etc.
926 	 *
927 	 * No need to css_get on root memcg as the reference
928 	 * counting is disabled on the root level in the
929 	 * cgroup core. See CSS_NO_REF.
930 	 */
931 	if (unlikely(!mm)) {
932 		memcg = active_memcg();
933 		if (unlikely(memcg)) {
934 			/* remote memcg must hold a ref */
935 			css_get(&memcg->css);
936 			return memcg;
937 		}
938 		mm = current->mm;
939 		if (unlikely(!mm))
940 			return root_mem_cgroup;
941 	}
942 
943 	rcu_read_lock();
944 	do {
945 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
946 		if (unlikely(!memcg))
947 			memcg = root_mem_cgroup;
948 	} while (!css_tryget(&memcg->css));
949 	rcu_read_unlock();
950 	return memcg;
951 }
952 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
953 
954 /**
955  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
956  */
get_mem_cgroup_from_current(void)957 struct mem_cgroup *get_mem_cgroup_from_current(void)
958 {
959 	struct mem_cgroup *memcg;
960 
961 	if (mem_cgroup_disabled())
962 		return NULL;
963 
964 again:
965 	rcu_read_lock();
966 	memcg = mem_cgroup_from_task(current);
967 	if (!css_tryget(&memcg->css)) {
968 		rcu_read_unlock();
969 		goto again;
970 	}
971 	rcu_read_unlock();
972 	return memcg;
973 }
974 
975 /**
976  * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
977  * @folio: folio from which memcg should be extracted.
978  */
get_mem_cgroup_from_folio(struct folio * folio)979 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
980 {
981 	struct mem_cgroup *memcg = folio_memcg(folio);
982 
983 	if (mem_cgroup_disabled())
984 		return NULL;
985 
986 	rcu_read_lock();
987 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
988 		memcg = root_mem_cgroup;
989 	rcu_read_unlock();
990 	return memcg;
991 }
992 
993 /**
994  * mem_cgroup_iter - iterate over memory cgroup hierarchy
995  * @root: hierarchy root
996  * @prev: previously returned memcg, NULL on first invocation
997  * @reclaim: cookie for shared reclaim walks, NULL for full walks
998  *
999  * Returns references to children of the hierarchy below @root, or
1000  * @root itself, or %NULL after a full round-trip.
1001  *
1002  * Caller must pass the return value in @prev on subsequent
1003  * invocations for reference counting, or use mem_cgroup_iter_break()
1004  * to cancel a hierarchy walk before the round-trip is complete.
1005  *
1006  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1007  * in the hierarchy among all concurrent reclaimers operating on the
1008  * same node.
1009  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1010 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1011 				   struct mem_cgroup *prev,
1012 				   struct mem_cgroup_reclaim_cookie *reclaim)
1013 {
1014 	struct mem_cgroup_reclaim_iter *iter;
1015 	struct cgroup_subsys_state *css;
1016 	struct mem_cgroup *pos;
1017 	struct mem_cgroup *next;
1018 
1019 	if (mem_cgroup_disabled())
1020 		return NULL;
1021 
1022 	if (!root)
1023 		root = root_mem_cgroup;
1024 
1025 	rcu_read_lock();
1026 restart:
1027 	next = NULL;
1028 
1029 	if (reclaim) {
1030 		int gen;
1031 		int nid = reclaim->pgdat->node_id;
1032 
1033 		iter = &root->nodeinfo[nid]->iter;
1034 		gen = atomic_read(&iter->generation);
1035 
1036 		/*
1037 		 * On start, join the current reclaim iteration cycle.
1038 		 * Exit when a concurrent walker completes it.
1039 		 */
1040 		if (!prev)
1041 			reclaim->generation = gen;
1042 		else if (reclaim->generation != gen)
1043 			goto out_unlock;
1044 
1045 		pos = READ_ONCE(iter->position);
1046 	} else
1047 		pos = prev;
1048 
1049 	css = pos ? &pos->css : NULL;
1050 
1051 	while ((css = css_next_descendant_pre(css, &root->css))) {
1052 		/*
1053 		 * Verify the css and acquire a reference.  The root
1054 		 * is provided by the caller, so we know it's alive
1055 		 * and kicking, and don't take an extra reference.
1056 		 */
1057 		if (css == &root->css || css_tryget(css))
1058 			break;
1059 	}
1060 
1061 	next = mem_cgroup_from_css(css);
1062 
1063 	if (reclaim) {
1064 		/*
1065 		 * The position could have already been updated by a competing
1066 		 * thread, so check that the value hasn't changed since we read
1067 		 * it to avoid reclaiming from the same cgroup twice.
1068 		 */
1069 		if (cmpxchg(&iter->position, pos, next) != pos) {
1070 			if (css && css != &root->css)
1071 				css_put(css);
1072 			goto restart;
1073 		}
1074 
1075 		if (!next) {
1076 			atomic_inc(&iter->generation);
1077 
1078 			/*
1079 			 * Reclaimers share the hierarchy walk, and a
1080 			 * new one might jump in right at the end of
1081 			 * the hierarchy - make sure they see at least
1082 			 * one group and restart from the beginning.
1083 			 */
1084 			if (!prev)
1085 				goto restart;
1086 		}
1087 	}
1088 
1089 out_unlock:
1090 	rcu_read_unlock();
1091 	if (prev && prev != root)
1092 		css_put(&prev->css);
1093 
1094 	return next;
1095 }
1096 
1097 /**
1098  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1099  * @root: hierarchy root
1100  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1101  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1102 void mem_cgroup_iter_break(struct mem_cgroup *root,
1103 			   struct mem_cgroup *prev)
1104 {
1105 	if (!root)
1106 		root = root_mem_cgroup;
1107 	if (prev && prev != root)
1108 		css_put(&prev->css);
1109 }
1110 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1111 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1112 					struct mem_cgroup *dead_memcg)
1113 {
1114 	struct mem_cgroup_reclaim_iter *iter;
1115 	struct mem_cgroup_per_node *mz;
1116 	int nid;
1117 
1118 	for_each_node(nid) {
1119 		mz = from->nodeinfo[nid];
1120 		iter = &mz->iter;
1121 		cmpxchg(&iter->position, dead_memcg, NULL);
1122 	}
1123 }
1124 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1125 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1126 {
1127 	struct mem_cgroup *memcg = dead_memcg;
1128 	struct mem_cgroup *last;
1129 
1130 	do {
1131 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1132 		last = memcg;
1133 	} while ((memcg = parent_mem_cgroup(memcg)));
1134 
1135 	/*
1136 	 * When cgroup1 non-hierarchy mode is used,
1137 	 * parent_mem_cgroup() does not walk all the way up to the
1138 	 * cgroup root (root_mem_cgroup). So we have to handle
1139 	 * dead_memcg from cgroup root separately.
1140 	 */
1141 	if (!mem_cgroup_is_root(last))
1142 		__invalidate_reclaim_iterators(root_mem_cgroup,
1143 						dead_memcg);
1144 }
1145 
1146 /**
1147  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1148  * @memcg: hierarchy root
1149  * @fn: function to call for each task
1150  * @arg: argument passed to @fn
1151  *
1152  * This function iterates over tasks attached to @memcg or to any of its
1153  * descendants and calls @fn for each task. If @fn returns a non-zero
1154  * value, the function breaks the iteration loop. Otherwise, it will iterate
1155  * over all tasks and return 0.
1156  *
1157  * This function must not be called for the root memory cgroup.
1158  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1159 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1160 			   int (*fn)(struct task_struct *, void *), void *arg)
1161 {
1162 	struct mem_cgroup *iter;
1163 	int ret = 0;
1164 	int i = 0;
1165 
1166 	BUG_ON(mem_cgroup_is_root(memcg));
1167 
1168 	for_each_mem_cgroup_tree(iter, memcg) {
1169 		struct css_task_iter it;
1170 		struct task_struct *task;
1171 
1172 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1173 		while (!ret && (task = css_task_iter_next(&it))) {
1174 			/* Avoid potential softlockup warning */
1175 			if ((++i & 1023) == 0)
1176 				cond_resched();
1177 			ret = fn(task, arg);
1178 		}
1179 		css_task_iter_end(&it);
1180 		if (ret) {
1181 			mem_cgroup_iter_break(memcg, iter);
1182 			break;
1183 		}
1184 	}
1185 }
1186 
1187 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1188 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1189 {
1190 	struct mem_cgroup *memcg;
1191 
1192 	if (mem_cgroup_disabled())
1193 		return;
1194 
1195 	memcg = folio_memcg(folio);
1196 
1197 	if (!memcg)
1198 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1199 	else
1200 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1201 }
1202 #endif
1203 
1204 /**
1205  * folio_lruvec_lock - Lock the lruvec for a folio.
1206  * @folio: Pointer to the folio.
1207  *
1208  * These functions are safe to use under any of the following conditions:
1209  * - folio locked
1210  * - folio_test_lru false
1211  * - folio frozen (refcount of 0)
1212  *
1213  * Return: The lruvec this folio is on with its lock held.
1214  */
folio_lruvec_lock(struct folio * folio)1215 struct lruvec *folio_lruvec_lock(struct folio *folio)
1216 {
1217 	struct lruvec *lruvec = folio_lruvec(folio);
1218 
1219 	spin_lock(&lruvec->lru_lock);
1220 	lruvec_memcg_debug(lruvec, folio);
1221 
1222 	return lruvec;
1223 }
1224 
1225 /**
1226  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1227  * @folio: Pointer to the folio.
1228  *
1229  * These functions are safe to use under any of the following conditions:
1230  * - folio locked
1231  * - folio_test_lru false
1232  * - folio frozen (refcount of 0)
1233  *
1234  * Return: The lruvec this folio is on with its lock held and interrupts
1235  * disabled.
1236  */
folio_lruvec_lock_irq(struct folio * folio)1237 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1238 {
1239 	struct lruvec *lruvec = folio_lruvec(folio);
1240 
1241 	spin_lock_irq(&lruvec->lru_lock);
1242 	lruvec_memcg_debug(lruvec, folio);
1243 
1244 	return lruvec;
1245 }
1246 
1247 /**
1248  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1249  * @folio: Pointer to the folio.
1250  * @flags: Pointer to irqsave flags.
1251  *
1252  * These functions are safe to use under any of the following conditions:
1253  * - folio locked
1254  * - folio_test_lru false
1255  * - folio frozen (refcount of 0)
1256  *
1257  * Return: The lruvec this folio is on with its lock held and interrupts
1258  * disabled.
1259  */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1260 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1261 		unsigned long *flags)
1262 {
1263 	struct lruvec *lruvec = folio_lruvec(folio);
1264 
1265 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1266 	lruvec_memcg_debug(lruvec, folio);
1267 
1268 	return lruvec;
1269 }
1270 
1271 /**
1272  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1273  * @lruvec: mem_cgroup per zone lru vector
1274  * @lru: index of lru list the page is sitting on
1275  * @zid: zone id of the accounted pages
1276  * @nr_pages: positive when adding or negative when removing
1277  *
1278  * This function must be called under lru_lock, just before a page is added
1279  * to or just after a page is removed from an lru list.
1280  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1281 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1282 				int zid, int nr_pages)
1283 {
1284 	struct mem_cgroup_per_node *mz;
1285 	unsigned long *lru_size;
1286 	long size;
1287 
1288 	if (mem_cgroup_disabled())
1289 		return;
1290 
1291 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1292 	lru_size = &mz->lru_zone_size[zid][lru];
1293 
1294 	if (nr_pages < 0)
1295 		*lru_size += nr_pages;
1296 
1297 	size = *lru_size;
1298 	if (WARN_ONCE(size < 0,
1299 		"%s(%p, %d, %d): lru_size %ld\n",
1300 		__func__, lruvec, lru, nr_pages, size)) {
1301 		VM_BUG_ON(1);
1302 		*lru_size = 0;
1303 	}
1304 
1305 	if (nr_pages > 0)
1306 		*lru_size += nr_pages;
1307 }
1308 
1309 /**
1310  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1311  * @memcg: the memory cgroup
1312  *
1313  * Returns the maximum amount of memory @mem can be charged with, in
1314  * pages.
1315  */
mem_cgroup_margin(struct mem_cgroup * memcg)1316 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1317 {
1318 	unsigned long margin = 0;
1319 	unsigned long count;
1320 	unsigned long limit;
1321 
1322 	count = page_counter_read(&memcg->memory);
1323 	limit = READ_ONCE(memcg->memory.max);
1324 	if (count < limit)
1325 		margin = limit - count;
1326 
1327 	if (do_memsw_account()) {
1328 		count = page_counter_read(&memcg->memsw);
1329 		limit = READ_ONCE(memcg->memsw.max);
1330 		if (count < limit)
1331 			margin = min(margin, limit - count);
1332 		else
1333 			margin = 0;
1334 	}
1335 
1336 	return margin;
1337 }
1338 
1339 struct memory_stat {
1340 	const char *name;
1341 	unsigned int idx;
1342 };
1343 
1344 static const struct memory_stat memory_stats[] = {
1345 	{ "anon",			NR_ANON_MAPPED			},
1346 	{ "file",			NR_FILE_PAGES			},
1347 	{ "kernel",			MEMCG_KMEM			},
1348 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1349 	{ "pagetables",			NR_PAGETABLE			},
1350 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1351 	{ "percpu",			MEMCG_PERCPU_B			},
1352 	{ "sock",			MEMCG_SOCK			},
1353 	{ "vmalloc",			MEMCG_VMALLOC			},
1354 	{ "shmem",			NR_SHMEM			},
1355 #ifdef CONFIG_ZSWAP
1356 	{ "zswap",			MEMCG_ZSWAP_B			},
1357 	{ "zswapped",			MEMCG_ZSWAPPED			},
1358 #endif
1359 	{ "file_mapped",		NR_FILE_MAPPED			},
1360 	{ "file_dirty",			NR_FILE_DIRTY			},
1361 	{ "file_writeback",		NR_WRITEBACK			},
1362 #ifdef CONFIG_SWAP
1363 	{ "swapcached",			NR_SWAPCACHE			},
1364 #endif
1365 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1366 	{ "anon_thp",			NR_ANON_THPS			},
1367 	{ "file_thp",			NR_FILE_THPS			},
1368 	{ "shmem_thp",			NR_SHMEM_THPS			},
1369 #endif
1370 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1371 	{ "active_anon",		NR_ACTIVE_ANON			},
1372 	{ "inactive_file",		NR_INACTIVE_FILE		},
1373 	{ "active_file",		NR_ACTIVE_FILE			},
1374 	{ "unevictable",		NR_UNEVICTABLE			},
1375 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1376 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1377 #ifdef CONFIG_HUGETLB_PAGE
1378 	{ "hugetlb",			NR_HUGETLB			},
1379 #endif
1380 
1381 	/* The memory events */
1382 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1383 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1384 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1385 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1386 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1387 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1388 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1389 
1390 	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1391 	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1392 	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1393 #ifdef CONFIG_NUMA_BALANCING
1394 	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1395 #endif
1396 };
1397 
1398 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1399 static int memcg_page_state_unit(int item)
1400 {
1401 	switch (item) {
1402 	case MEMCG_PERCPU_B:
1403 	case MEMCG_ZSWAP_B:
1404 	case NR_SLAB_RECLAIMABLE_B:
1405 	case NR_SLAB_UNRECLAIMABLE_B:
1406 		return 1;
1407 	case NR_KERNEL_STACK_KB:
1408 		return SZ_1K;
1409 	default:
1410 		return PAGE_SIZE;
1411 	}
1412 }
1413 
1414 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1415 static int memcg_page_state_output_unit(int item)
1416 {
1417 	/*
1418 	 * Workingset state is actually in pages, but we export it to userspace
1419 	 * as a scalar count of events, so special case it here.
1420 	 *
1421 	 * Demotion and promotion activities are exported in pages, consistent
1422 	 * with their global counterparts.
1423 	 */
1424 	switch (item) {
1425 	case WORKINGSET_REFAULT_ANON:
1426 	case WORKINGSET_REFAULT_FILE:
1427 	case WORKINGSET_ACTIVATE_ANON:
1428 	case WORKINGSET_ACTIVATE_FILE:
1429 	case WORKINGSET_RESTORE_ANON:
1430 	case WORKINGSET_RESTORE_FILE:
1431 	case WORKINGSET_NODERECLAIM:
1432 	case PGDEMOTE_KSWAPD:
1433 	case PGDEMOTE_DIRECT:
1434 	case PGDEMOTE_KHUGEPAGED:
1435 #ifdef CONFIG_NUMA_BALANCING
1436 	case PGPROMOTE_SUCCESS:
1437 #endif
1438 		return 1;
1439 	default:
1440 		return memcg_page_state_unit(item);
1441 	}
1442 }
1443 
memcg_page_state_output(struct mem_cgroup * memcg,int item)1444 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1445 {
1446 	return memcg_page_state(memcg, item) *
1447 		memcg_page_state_output_unit(item);
1448 }
1449 
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1450 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1451 {
1452 	return memcg_page_state_local(memcg, item) *
1453 		memcg_page_state_output_unit(item);
1454 }
1455 
1456 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1457 static bool memcg_accounts_hugetlb(void)
1458 {
1459 	return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1460 }
1461 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1462 static bool memcg_accounts_hugetlb(void)
1463 {
1464 	return false;
1465 }
1466 #endif /* CONFIG_HUGETLB_PAGE */
1467 
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1468 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1469 {
1470 	int i;
1471 
1472 	/*
1473 	 * Provide statistics on the state of the memory subsystem as
1474 	 * well as cumulative event counters that show past behavior.
1475 	 *
1476 	 * This list is ordered following a combination of these gradients:
1477 	 * 1) generic big picture -> specifics and details
1478 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1479 	 *
1480 	 * Current memory state:
1481 	 */
1482 	mem_cgroup_flush_stats(memcg);
1483 
1484 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1485 		u64 size;
1486 
1487 #ifdef CONFIG_HUGETLB_PAGE
1488 		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1489 			!memcg_accounts_hugetlb())
1490 			continue;
1491 #endif
1492 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1493 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1494 
1495 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1496 			size += memcg_page_state_output(memcg,
1497 							NR_SLAB_RECLAIMABLE_B);
1498 			seq_buf_printf(s, "slab %llu\n", size);
1499 		}
1500 	}
1501 
1502 	/* Accumulated memory events */
1503 	seq_buf_printf(s, "pgscan %lu\n",
1504 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1505 		       memcg_events(memcg, PGSCAN_DIRECT) +
1506 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1507 	seq_buf_printf(s, "pgsteal %lu\n",
1508 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1509 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1510 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1511 
1512 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1513 #ifdef CONFIG_MEMCG_V1
1514 		if (memcg_vm_event_stat[i] == PGPGIN ||
1515 		    memcg_vm_event_stat[i] == PGPGOUT)
1516 			continue;
1517 #endif
1518 		seq_buf_printf(s, "%s %lu\n",
1519 			       vm_event_name(memcg_vm_event_stat[i]),
1520 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1521 	}
1522 }
1523 
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1524 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1525 {
1526 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1527 		memcg_stat_format(memcg, s);
1528 	else
1529 		memcg1_stat_format(memcg, s);
1530 	if (seq_buf_has_overflowed(s))
1531 		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1532 }
1533 
1534 /**
1535  * mem_cgroup_print_oom_context: Print OOM information relevant to
1536  * memory controller.
1537  * @memcg: The memory cgroup that went over limit
1538  * @p: Task that is going to be killed
1539  *
1540  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1541  * enabled
1542  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1543 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1544 {
1545 	rcu_read_lock();
1546 
1547 	if (memcg) {
1548 		pr_cont(",oom_memcg=");
1549 		pr_cont_cgroup_path(memcg->css.cgroup);
1550 	} else
1551 		pr_cont(",global_oom");
1552 	if (p) {
1553 		pr_cont(",task_memcg=");
1554 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1555 	}
1556 	rcu_read_unlock();
1557 }
1558 
1559 /**
1560  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1561  * memory controller.
1562  * @memcg: The memory cgroup that went over limit
1563  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1564 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1565 {
1566 	/* Use static buffer, for the caller is holding oom_lock. */
1567 	static char buf[SEQ_BUF_SIZE];
1568 	struct seq_buf s;
1569 
1570 	lockdep_assert_held(&oom_lock);
1571 
1572 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1573 		K((u64)page_counter_read(&memcg->memory)),
1574 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1575 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1576 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1577 			K((u64)page_counter_read(&memcg->swap)),
1578 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1579 #ifdef CONFIG_MEMCG_V1
1580 	else {
1581 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1582 			K((u64)page_counter_read(&memcg->memsw)),
1583 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1584 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1585 			K((u64)page_counter_read(&memcg->kmem)),
1586 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1587 	}
1588 #endif
1589 
1590 	pr_info("Memory cgroup stats for ");
1591 	pr_cont_cgroup_path(memcg->css.cgroup);
1592 	pr_cont(":");
1593 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1594 	memory_stat_format(memcg, &s);
1595 	seq_buf_do_printk(&s, KERN_INFO);
1596 }
1597 
1598 /*
1599  * Return the memory (and swap, if configured) limit for a memcg.
1600  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1601 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1602 {
1603 	unsigned long max = READ_ONCE(memcg->memory.max);
1604 
1605 	if (do_memsw_account()) {
1606 		if (mem_cgroup_swappiness(memcg)) {
1607 			/* Calculate swap excess capacity from memsw limit */
1608 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1609 
1610 			max += min(swap, (unsigned long)total_swap_pages);
1611 		}
1612 	} else {
1613 		if (mem_cgroup_swappiness(memcg))
1614 			max += min(READ_ONCE(memcg->swap.max),
1615 				   (unsigned long)total_swap_pages);
1616 	}
1617 	return max;
1618 }
1619 
mem_cgroup_size(struct mem_cgroup * memcg)1620 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1621 {
1622 	return page_counter_read(&memcg->memory);
1623 }
1624 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1625 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1626 				     int order)
1627 {
1628 	struct oom_control oc = {
1629 		.zonelist = NULL,
1630 		.nodemask = NULL,
1631 		.memcg = memcg,
1632 		.gfp_mask = gfp_mask,
1633 		.order = order,
1634 	};
1635 	bool ret = true;
1636 
1637 	if (mutex_lock_killable(&oom_lock))
1638 		return true;
1639 
1640 	if (mem_cgroup_margin(memcg) >= (1 << order))
1641 		goto unlock;
1642 
1643 	/*
1644 	 * A few threads which were not waiting at mutex_lock_killable() can
1645 	 * fail to bail out. Therefore, check again after holding oom_lock.
1646 	 */
1647 	ret = task_is_dying() || out_of_memory(&oc);
1648 
1649 unlock:
1650 	mutex_unlock(&oom_lock);
1651 	return ret;
1652 }
1653 
1654 /*
1655  * Returns true if successfully killed one or more processes. Though in some
1656  * corner cases it can return true even without killing any process.
1657  */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1658 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1659 {
1660 	bool locked, ret;
1661 
1662 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1663 		return false;
1664 
1665 	memcg_memory_event(memcg, MEMCG_OOM);
1666 
1667 	if (!memcg1_oom_prepare(memcg, &locked))
1668 		return false;
1669 
1670 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1671 
1672 	memcg1_oom_finish(memcg, locked);
1673 
1674 	return ret;
1675 }
1676 
1677 /**
1678  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1679  * @victim: task to be killed by the OOM killer
1680  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1681  *
1682  * Returns a pointer to a memory cgroup, which has to be cleaned up
1683  * by killing all belonging OOM-killable tasks.
1684  *
1685  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1686  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1687 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1688 					    struct mem_cgroup *oom_domain)
1689 {
1690 	struct mem_cgroup *oom_group = NULL;
1691 	struct mem_cgroup *memcg;
1692 
1693 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1694 		return NULL;
1695 
1696 	if (!oom_domain)
1697 		oom_domain = root_mem_cgroup;
1698 
1699 	rcu_read_lock();
1700 
1701 	memcg = mem_cgroup_from_task(victim);
1702 	if (mem_cgroup_is_root(memcg))
1703 		goto out;
1704 
1705 	/*
1706 	 * If the victim task has been asynchronously moved to a different
1707 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1708 	 * In this case it's better to ignore memory.group.oom.
1709 	 */
1710 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1711 		goto out;
1712 
1713 	/*
1714 	 * Traverse the memory cgroup hierarchy from the victim task's
1715 	 * cgroup up to the OOMing cgroup (or root) to find the
1716 	 * highest-level memory cgroup with oom.group set.
1717 	 */
1718 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1719 		if (READ_ONCE(memcg->oom_group))
1720 			oom_group = memcg;
1721 
1722 		if (memcg == oom_domain)
1723 			break;
1724 	}
1725 
1726 	if (oom_group)
1727 		css_get(&oom_group->css);
1728 out:
1729 	rcu_read_unlock();
1730 
1731 	return oom_group;
1732 }
1733 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1734 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1735 {
1736 	pr_info("Tasks in ");
1737 	pr_cont_cgroup_path(memcg->css.cgroup);
1738 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1739 }
1740 
1741 struct memcg_stock_pcp {
1742 	local_lock_t stock_lock;
1743 	struct mem_cgroup *cached; /* this never be root cgroup */
1744 	unsigned int nr_pages;
1745 
1746 	struct obj_cgroup *cached_objcg;
1747 	struct pglist_data *cached_pgdat;
1748 	unsigned int nr_bytes;
1749 	int nr_slab_reclaimable_b;
1750 	int nr_slab_unreclaimable_b;
1751 
1752 	struct work_struct work;
1753 	unsigned long flags;
1754 #define FLUSHING_CACHED_CHARGE	0
1755 };
1756 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1757 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1758 };
1759 static DEFINE_MUTEX(percpu_charge_mutex);
1760 
1761 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1762 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1763 				     struct mem_cgroup *root_memcg);
1764 
1765 /**
1766  * consume_stock: Try to consume stocked charge on this cpu.
1767  * @memcg: memcg to consume from.
1768  * @nr_pages: how many pages to charge.
1769  *
1770  * The charges will only happen if @memcg matches the current cpu's memcg
1771  * stock, and at least @nr_pages are available in that stock.  Failure to
1772  * service an allocation will refill the stock.
1773  *
1774  * returns true if successful, false otherwise.
1775  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1776 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1777 {
1778 	struct memcg_stock_pcp *stock;
1779 	unsigned int stock_pages;
1780 	unsigned long flags;
1781 	bool ret = false;
1782 
1783 	if (nr_pages > MEMCG_CHARGE_BATCH)
1784 		return ret;
1785 
1786 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1787 
1788 	stock = this_cpu_ptr(&memcg_stock);
1789 	stock_pages = READ_ONCE(stock->nr_pages);
1790 	if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1791 		WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1792 		ret = true;
1793 	}
1794 
1795 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1796 
1797 	return ret;
1798 }
1799 
1800 /*
1801  * Returns stocks cached in percpu and reset cached information.
1802  */
drain_stock(struct memcg_stock_pcp * stock)1803 static void drain_stock(struct memcg_stock_pcp *stock)
1804 {
1805 	unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1806 	struct mem_cgroup *old = READ_ONCE(stock->cached);
1807 
1808 	if (!old)
1809 		return;
1810 
1811 	if (stock_pages) {
1812 		page_counter_uncharge(&old->memory, stock_pages);
1813 		if (do_memsw_account())
1814 			page_counter_uncharge(&old->memsw, stock_pages);
1815 
1816 		WRITE_ONCE(stock->nr_pages, 0);
1817 	}
1818 
1819 	css_put(&old->css);
1820 	WRITE_ONCE(stock->cached, NULL);
1821 }
1822 
drain_local_stock(struct work_struct * dummy)1823 static void drain_local_stock(struct work_struct *dummy)
1824 {
1825 	struct memcg_stock_pcp *stock;
1826 	struct obj_cgroup *old = NULL;
1827 	unsigned long flags;
1828 
1829 	/*
1830 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1831 	 * drain_stock races is that we always operate on local CPU stock
1832 	 * here with IRQ disabled
1833 	 */
1834 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1835 
1836 	stock = this_cpu_ptr(&memcg_stock);
1837 	old = drain_obj_stock(stock);
1838 	drain_stock(stock);
1839 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1840 
1841 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1842 	obj_cgroup_put(old);
1843 }
1844 
1845 /*
1846  * Cache charges(val) to local per_cpu area.
1847  * This will be consumed by consume_stock() function, later.
1848  */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1849 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1850 {
1851 	struct memcg_stock_pcp *stock;
1852 	unsigned int stock_pages;
1853 
1854 	stock = this_cpu_ptr(&memcg_stock);
1855 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1856 		drain_stock(stock);
1857 		css_get(&memcg->css);
1858 		WRITE_ONCE(stock->cached, memcg);
1859 	}
1860 	stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1861 	WRITE_ONCE(stock->nr_pages, stock_pages);
1862 
1863 	if (stock_pages > MEMCG_CHARGE_BATCH)
1864 		drain_stock(stock);
1865 }
1866 
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1867 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1868 {
1869 	unsigned long flags;
1870 
1871 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1872 	__refill_stock(memcg, nr_pages);
1873 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1874 }
1875 
1876 /*
1877  * Drains all per-CPU charge caches for given root_memcg resp. subtree
1878  * of the hierarchy under it.
1879  */
drain_all_stock(struct mem_cgroup * root_memcg)1880 void drain_all_stock(struct mem_cgroup *root_memcg)
1881 {
1882 	int cpu, curcpu;
1883 
1884 	/* If someone's already draining, avoid adding running more workers. */
1885 	if (!mutex_trylock(&percpu_charge_mutex))
1886 		return;
1887 	/*
1888 	 * Notify other cpus that system-wide "drain" is running
1889 	 * We do not care about races with the cpu hotplug because cpu down
1890 	 * as well as workers from this path always operate on the local
1891 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1892 	 */
1893 	migrate_disable();
1894 	curcpu = smp_processor_id();
1895 	for_each_online_cpu(cpu) {
1896 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1897 		struct mem_cgroup *memcg;
1898 		bool flush = false;
1899 
1900 		rcu_read_lock();
1901 		memcg = READ_ONCE(stock->cached);
1902 		if (memcg && READ_ONCE(stock->nr_pages) &&
1903 		    mem_cgroup_is_descendant(memcg, root_memcg))
1904 			flush = true;
1905 		else if (obj_stock_flush_required(stock, root_memcg))
1906 			flush = true;
1907 		rcu_read_unlock();
1908 
1909 		if (flush &&
1910 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1911 			if (cpu == curcpu)
1912 				drain_local_stock(&stock->work);
1913 			else if (!cpu_is_isolated(cpu))
1914 				schedule_work_on(cpu, &stock->work);
1915 		}
1916 	}
1917 	migrate_enable();
1918 	mutex_unlock(&percpu_charge_mutex);
1919 }
1920 
memcg_hotplug_cpu_dead(unsigned int cpu)1921 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1922 {
1923 	struct memcg_stock_pcp *stock;
1924 
1925 	stock = &per_cpu(memcg_stock, cpu);
1926 	drain_stock(stock);
1927 
1928 	return 0;
1929 }
1930 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1931 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1932 				  unsigned int nr_pages,
1933 				  gfp_t gfp_mask)
1934 {
1935 	unsigned long nr_reclaimed = 0;
1936 
1937 	do {
1938 		unsigned long pflags;
1939 
1940 		if (page_counter_read(&memcg->memory) <=
1941 		    READ_ONCE(memcg->memory.high))
1942 			continue;
1943 
1944 		memcg_memory_event(memcg, MEMCG_HIGH);
1945 
1946 		psi_memstall_enter(&pflags);
1947 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1948 							gfp_mask,
1949 							MEMCG_RECLAIM_MAY_SWAP,
1950 							NULL);
1951 		psi_memstall_leave(&pflags);
1952 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1953 		 !mem_cgroup_is_root(memcg));
1954 
1955 	return nr_reclaimed;
1956 }
1957 
high_work_func(struct work_struct * work)1958 static void high_work_func(struct work_struct *work)
1959 {
1960 	struct mem_cgroup *memcg;
1961 
1962 	memcg = container_of(work, struct mem_cgroup, high_work);
1963 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1964 }
1965 
1966 /*
1967  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1968  * enough to still cause a significant slowdown in most cases, while still
1969  * allowing diagnostics and tracing to proceed without becoming stuck.
1970  */
1971 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1972 
1973 /*
1974  * When calculating the delay, we use these either side of the exponentiation to
1975  * maintain precision and scale to a reasonable number of jiffies (see the table
1976  * below.
1977  *
1978  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1979  *   overage ratio to a delay.
1980  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1981  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
1982  *   to produce a reasonable delay curve.
1983  *
1984  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1985  * reasonable delay curve compared to precision-adjusted overage, not
1986  * penalising heavily at first, but still making sure that growth beyond the
1987  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1988  * example, with a high of 100 megabytes:
1989  *
1990  *  +-------+------------------------+
1991  *  | usage | time to allocate in ms |
1992  *  +-------+------------------------+
1993  *  | 100M  |                      0 |
1994  *  | 101M  |                      6 |
1995  *  | 102M  |                     25 |
1996  *  | 103M  |                     57 |
1997  *  | 104M  |                    102 |
1998  *  | 105M  |                    159 |
1999  *  | 106M  |                    230 |
2000  *  | 107M  |                    313 |
2001  *  | 108M  |                    409 |
2002  *  | 109M  |                    518 |
2003  *  | 110M  |                    639 |
2004  *  | 111M  |                    774 |
2005  *  | 112M  |                    921 |
2006  *  | 113M  |                   1081 |
2007  *  | 114M  |                   1254 |
2008  *  | 115M  |                   1439 |
2009  *  | 116M  |                   1638 |
2010  *  | 117M  |                   1849 |
2011  *  | 118M  |                   2000 |
2012  *  | 119M  |                   2000 |
2013  *  | 120M  |                   2000 |
2014  *  +-------+------------------------+
2015  */
2016  #define MEMCG_DELAY_PRECISION_SHIFT 20
2017  #define MEMCG_DELAY_SCALING_SHIFT 14
2018 
calculate_overage(unsigned long usage,unsigned long high)2019 static u64 calculate_overage(unsigned long usage, unsigned long high)
2020 {
2021 	u64 overage;
2022 
2023 	if (usage <= high)
2024 		return 0;
2025 
2026 	/*
2027 	 * Prevent division by 0 in overage calculation by acting as if
2028 	 * it was a threshold of 1 page
2029 	 */
2030 	high = max(high, 1UL);
2031 
2032 	overage = usage - high;
2033 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2034 	return div64_u64(overage, high);
2035 }
2036 
mem_find_max_overage(struct mem_cgroup * memcg)2037 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2038 {
2039 	u64 overage, max_overage = 0;
2040 
2041 	do {
2042 		overage = calculate_overage(page_counter_read(&memcg->memory),
2043 					    READ_ONCE(memcg->memory.high));
2044 		max_overage = max(overage, max_overage);
2045 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2046 		 !mem_cgroup_is_root(memcg));
2047 
2048 	return max_overage;
2049 }
2050 
swap_find_max_overage(struct mem_cgroup * memcg)2051 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2052 {
2053 	u64 overage, max_overage = 0;
2054 
2055 	do {
2056 		overage = calculate_overage(page_counter_read(&memcg->swap),
2057 					    READ_ONCE(memcg->swap.high));
2058 		if (overage)
2059 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2060 		max_overage = max(overage, max_overage);
2061 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2062 		 !mem_cgroup_is_root(memcg));
2063 
2064 	return max_overage;
2065 }
2066 
2067 /*
2068  * Get the number of jiffies that we should penalise a mischievous cgroup which
2069  * is exceeding its memory.high by checking both it and its ancestors.
2070  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2071 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2072 					  unsigned int nr_pages,
2073 					  u64 max_overage)
2074 {
2075 	unsigned long penalty_jiffies;
2076 
2077 	if (!max_overage)
2078 		return 0;
2079 
2080 	/*
2081 	 * We use overage compared to memory.high to calculate the number of
2082 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2083 	 * fairly lenient on small overages, and increasingly harsh when the
2084 	 * memcg in question makes it clear that it has no intention of stopping
2085 	 * its crazy behaviour, so we exponentially increase the delay based on
2086 	 * overage amount.
2087 	 */
2088 	penalty_jiffies = max_overage * max_overage * HZ;
2089 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2090 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2091 
2092 	/*
2093 	 * Factor in the task's own contribution to the overage, such that four
2094 	 * N-sized allocations are throttled approximately the same as one
2095 	 * 4N-sized allocation.
2096 	 *
2097 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2098 	 * larger the current charge patch is than that.
2099 	 */
2100 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2101 }
2102 
2103 /*
2104  * Reclaims memory over the high limit. Called directly from
2105  * try_charge() (context permitting), as well as from the userland
2106  * return path where reclaim is always able to block.
2107  */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2108 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2109 {
2110 	unsigned long penalty_jiffies;
2111 	unsigned long pflags;
2112 	unsigned long nr_reclaimed;
2113 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2114 	int nr_retries = MAX_RECLAIM_RETRIES;
2115 	struct mem_cgroup *memcg;
2116 	bool in_retry = false;
2117 
2118 	if (likely(!nr_pages))
2119 		return;
2120 
2121 	memcg = get_mem_cgroup_from_mm(current->mm);
2122 	current->memcg_nr_pages_over_high = 0;
2123 
2124 retry_reclaim:
2125 	/*
2126 	 * Bail if the task is already exiting. Unlike memory.max,
2127 	 * memory.high enforcement isn't as strict, and there is no
2128 	 * OOM killer involved, which means the excess could already
2129 	 * be much bigger (and still growing) than it could for
2130 	 * memory.max; the dying task could get stuck in fruitless
2131 	 * reclaim for a long time, which isn't desirable.
2132 	 */
2133 	if (task_is_dying())
2134 		goto out;
2135 
2136 	/*
2137 	 * The allocating task should reclaim at least the batch size, but for
2138 	 * subsequent retries we only want to do what's necessary to prevent oom
2139 	 * or breaching resource isolation.
2140 	 *
2141 	 * This is distinct from memory.max or page allocator behaviour because
2142 	 * memory.high is currently batched, whereas memory.max and the page
2143 	 * allocator run every time an allocation is made.
2144 	 */
2145 	nr_reclaimed = reclaim_high(memcg,
2146 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2147 				    gfp_mask);
2148 
2149 	/*
2150 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2151 	 * allocators proactively to slow down excessive growth.
2152 	 */
2153 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2154 					       mem_find_max_overage(memcg));
2155 
2156 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2157 						swap_find_max_overage(memcg));
2158 
2159 	/*
2160 	 * Clamp the max delay per usermode return so as to still keep the
2161 	 * application moving forwards and also permit diagnostics, albeit
2162 	 * extremely slowly.
2163 	 */
2164 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2165 
2166 	/*
2167 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2168 	 * that it's not even worth doing, in an attempt to be nice to those who
2169 	 * go only a small amount over their memory.high value and maybe haven't
2170 	 * been aggressively reclaimed enough yet.
2171 	 */
2172 	if (penalty_jiffies <= HZ / 100)
2173 		goto out;
2174 
2175 	/*
2176 	 * If reclaim is making forward progress but we're still over
2177 	 * memory.high, we want to encourage that rather than doing allocator
2178 	 * throttling.
2179 	 */
2180 	if (nr_reclaimed || nr_retries--) {
2181 		in_retry = true;
2182 		goto retry_reclaim;
2183 	}
2184 
2185 	/*
2186 	 * Reclaim didn't manage to push usage below the limit, slow
2187 	 * this allocating task down.
2188 	 *
2189 	 * If we exit early, we're guaranteed to die (since
2190 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2191 	 * need to account for any ill-begotten jiffies to pay them off later.
2192 	 */
2193 	psi_memstall_enter(&pflags);
2194 	schedule_timeout_killable(penalty_jiffies);
2195 	psi_memstall_leave(&pflags);
2196 
2197 out:
2198 	css_put(&memcg->css);
2199 }
2200 
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2201 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2202 		     unsigned int nr_pages)
2203 {
2204 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2205 	int nr_retries = MAX_RECLAIM_RETRIES;
2206 	struct mem_cgroup *mem_over_limit;
2207 	struct page_counter *counter;
2208 	unsigned long nr_reclaimed;
2209 	bool passed_oom = false;
2210 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2211 	bool drained = false;
2212 	bool raised_max_event = false;
2213 	unsigned long pflags;
2214 
2215 retry:
2216 	if (consume_stock(memcg, nr_pages))
2217 		return 0;
2218 
2219 	if (!do_memsw_account() ||
2220 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2221 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2222 			goto done_restock;
2223 		if (do_memsw_account())
2224 			page_counter_uncharge(&memcg->memsw, batch);
2225 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2226 	} else {
2227 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2228 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2229 	}
2230 
2231 	if (batch > nr_pages) {
2232 		batch = nr_pages;
2233 		goto retry;
2234 	}
2235 
2236 	/*
2237 	 * Prevent unbounded recursion when reclaim operations need to
2238 	 * allocate memory. This might exceed the limits temporarily,
2239 	 * but we prefer facilitating memory reclaim and getting back
2240 	 * under the limit over triggering OOM kills in these cases.
2241 	 */
2242 	if (unlikely(current->flags & PF_MEMALLOC))
2243 		goto force;
2244 
2245 	if (unlikely(task_in_memcg_oom(current)))
2246 		goto nomem;
2247 
2248 	if (!gfpflags_allow_blocking(gfp_mask))
2249 		goto nomem;
2250 
2251 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2252 	raised_max_event = true;
2253 
2254 	psi_memstall_enter(&pflags);
2255 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2256 						    gfp_mask, reclaim_options, NULL);
2257 	psi_memstall_leave(&pflags);
2258 
2259 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2260 		goto retry;
2261 
2262 	if (!drained) {
2263 		drain_all_stock(mem_over_limit);
2264 		drained = true;
2265 		goto retry;
2266 	}
2267 
2268 	if (gfp_mask & __GFP_NORETRY)
2269 		goto nomem;
2270 	/*
2271 	 * Even though the limit is exceeded at this point, reclaim
2272 	 * may have been able to free some pages.  Retry the charge
2273 	 * before killing the task.
2274 	 *
2275 	 * Only for regular pages, though: huge pages are rather
2276 	 * unlikely to succeed so close to the limit, and we fall back
2277 	 * to regular pages anyway in case of failure.
2278 	 */
2279 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2280 		goto retry;
2281 
2282 	if (nr_retries--)
2283 		goto retry;
2284 
2285 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2286 		goto nomem;
2287 
2288 	/* Avoid endless loop for tasks bypassed by the oom killer */
2289 	if (passed_oom && task_is_dying())
2290 		goto nomem;
2291 
2292 	/*
2293 	 * keep retrying as long as the memcg oom killer is able to make
2294 	 * a forward progress or bypass the charge if the oom killer
2295 	 * couldn't make any progress.
2296 	 */
2297 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2298 			   get_order(nr_pages * PAGE_SIZE))) {
2299 		passed_oom = true;
2300 		nr_retries = MAX_RECLAIM_RETRIES;
2301 		goto retry;
2302 	}
2303 nomem:
2304 	/*
2305 	 * Memcg doesn't have a dedicated reserve for atomic
2306 	 * allocations. But like the global atomic pool, we need to
2307 	 * put the burden of reclaim on regular allocation requests
2308 	 * and let these go through as privileged allocations.
2309 	 */
2310 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2311 		return -ENOMEM;
2312 force:
2313 	/*
2314 	 * If the allocation has to be enforced, don't forget to raise
2315 	 * a MEMCG_MAX event.
2316 	 */
2317 	if (!raised_max_event)
2318 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2319 
2320 	/*
2321 	 * The allocation either can't fail or will lead to more memory
2322 	 * being freed very soon.  Allow memory usage go over the limit
2323 	 * temporarily by force charging it.
2324 	 */
2325 	page_counter_charge(&memcg->memory, nr_pages);
2326 	if (do_memsw_account())
2327 		page_counter_charge(&memcg->memsw, nr_pages);
2328 
2329 	return 0;
2330 
2331 done_restock:
2332 	if (batch > nr_pages)
2333 		refill_stock(memcg, batch - nr_pages);
2334 
2335 	/*
2336 	 * If the hierarchy is above the normal consumption range, schedule
2337 	 * reclaim on returning to userland.  We can perform reclaim here
2338 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2339 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2340 	 * not recorded as it most likely matches current's and won't
2341 	 * change in the meantime.  As high limit is checked again before
2342 	 * reclaim, the cost of mismatch is negligible.
2343 	 */
2344 	do {
2345 		bool mem_high, swap_high;
2346 
2347 		mem_high = page_counter_read(&memcg->memory) >
2348 			READ_ONCE(memcg->memory.high);
2349 		swap_high = page_counter_read(&memcg->swap) >
2350 			READ_ONCE(memcg->swap.high);
2351 
2352 		/* Don't bother a random interrupted task */
2353 		if (!in_task()) {
2354 			if (mem_high) {
2355 				schedule_work(&memcg->high_work);
2356 				break;
2357 			}
2358 			continue;
2359 		}
2360 
2361 		if (mem_high || swap_high) {
2362 			/*
2363 			 * The allocating tasks in this cgroup will need to do
2364 			 * reclaim or be throttled to prevent further growth
2365 			 * of the memory or swap footprints.
2366 			 *
2367 			 * Target some best-effort fairness between the tasks,
2368 			 * and distribute reclaim work and delay penalties
2369 			 * based on how much each task is actually allocating.
2370 			 */
2371 			current->memcg_nr_pages_over_high += batch;
2372 			set_notify_resume(current);
2373 			break;
2374 		}
2375 	} while ((memcg = parent_mem_cgroup(memcg)));
2376 
2377 	/*
2378 	 * Reclaim is set up above to be called from the userland
2379 	 * return path. But also attempt synchronous reclaim to avoid
2380 	 * excessive overrun while the task is still inside the
2381 	 * kernel. If this is successful, the return path will see it
2382 	 * when it rechecks the overage and simply bail out.
2383 	 */
2384 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2385 	    !(current->flags & PF_MEMALLOC) &&
2386 	    gfpflags_allow_blocking(gfp_mask))
2387 		mem_cgroup_handle_over_high(gfp_mask);
2388 	return 0;
2389 }
2390 
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2391 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2392 {
2393 	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2394 	/*
2395 	 * Any of the following ensures page's memcg stability:
2396 	 *
2397 	 * - the page lock
2398 	 * - LRU isolation
2399 	 * - exclusive reference
2400 	 */
2401 	folio->memcg_data = (unsigned long)memcg;
2402 }
2403 
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2404 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2405 				       struct pglist_data *pgdat,
2406 				       enum node_stat_item idx, int nr)
2407 {
2408 	struct mem_cgroup *memcg;
2409 	struct lruvec *lruvec;
2410 
2411 	rcu_read_lock();
2412 	memcg = obj_cgroup_memcg(objcg);
2413 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2414 	__mod_memcg_lruvec_state(lruvec, idx, nr);
2415 	rcu_read_unlock();
2416 }
2417 
2418 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2419 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2420 {
2421 	/*
2422 	 * Slab objects are accounted individually, not per-page.
2423 	 * Memcg membership data for each individual object is saved in
2424 	 * slab->obj_exts.
2425 	 */
2426 	if (folio_test_slab(folio)) {
2427 		struct slabobj_ext *obj_exts;
2428 		struct slab *slab;
2429 		unsigned int off;
2430 
2431 		slab = folio_slab(folio);
2432 		obj_exts = slab_obj_exts(slab);
2433 		if (!obj_exts)
2434 			return NULL;
2435 
2436 		off = obj_to_index(slab->slab_cache, slab, p);
2437 		if (obj_exts[off].objcg)
2438 			return obj_cgroup_memcg(obj_exts[off].objcg);
2439 
2440 		return NULL;
2441 	}
2442 
2443 	/*
2444 	 * folio_memcg_check() is used here, because in theory we can encounter
2445 	 * a folio where the slab flag has been cleared already, but
2446 	 * slab->obj_exts has not been freed yet
2447 	 * folio_memcg_check() will guarantee that a proper memory
2448 	 * cgroup pointer or NULL will be returned.
2449 	 */
2450 	return folio_memcg_check(folio);
2451 }
2452 
2453 /*
2454  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2455  * It is not suitable for objects allocated using vmalloc().
2456  *
2457  * A passed kernel object must be a slab object or a generic kernel page.
2458  *
2459  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2460  * cgroup_mutex, etc.
2461  */
mem_cgroup_from_slab_obj(void * p)2462 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2463 {
2464 	if (mem_cgroup_disabled())
2465 		return NULL;
2466 
2467 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2468 }
2469 
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2470 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2471 {
2472 	struct obj_cgroup *objcg = NULL;
2473 
2474 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2475 		objcg = rcu_dereference(memcg->objcg);
2476 		if (likely(objcg && obj_cgroup_tryget(objcg)))
2477 			break;
2478 		objcg = NULL;
2479 	}
2480 	return objcg;
2481 }
2482 
current_objcg_update(void)2483 static struct obj_cgroup *current_objcg_update(void)
2484 {
2485 	struct mem_cgroup *memcg;
2486 	struct obj_cgroup *old, *objcg = NULL;
2487 
2488 	do {
2489 		/* Atomically drop the update bit. */
2490 		old = xchg(&current->objcg, NULL);
2491 		if (old) {
2492 			old = (struct obj_cgroup *)
2493 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2494 			obj_cgroup_put(old);
2495 
2496 			old = NULL;
2497 		}
2498 
2499 		/* If new objcg is NULL, no reason for the second atomic update. */
2500 		if (!current->mm || (current->flags & PF_KTHREAD))
2501 			return NULL;
2502 
2503 		/*
2504 		 * Release the objcg pointer from the previous iteration,
2505 		 * if try_cmpxcg() below fails.
2506 		 */
2507 		if (unlikely(objcg)) {
2508 			obj_cgroup_put(objcg);
2509 			objcg = NULL;
2510 		}
2511 
2512 		/*
2513 		 * Obtain the new objcg pointer. The current task can be
2514 		 * asynchronously moved to another memcg and the previous
2515 		 * memcg can be offlined. So let's get the memcg pointer
2516 		 * and try get a reference to objcg under a rcu read lock.
2517 		 */
2518 
2519 		rcu_read_lock();
2520 		memcg = mem_cgroup_from_task(current);
2521 		objcg = __get_obj_cgroup_from_memcg(memcg);
2522 		rcu_read_unlock();
2523 
2524 		/*
2525 		 * Try set up a new objcg pointer atomically. If it
2526 		 * fails, it means the update flag was set concurrently, so
2527 		 * the whole procedure should be repeated.
2528 		 */
2529 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2530 
2531 	return objcg;
2532 }
2533 
current_obj_cgroup(void)2534 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2535 {
2536 	struct mem_cgroup *memcg;
2537 	struct obj_cgroup *objcg;
2538 
2539 	if (in_task()) {
2540 		memcg = current->active_memcg;
2541 		if (unlikely(memcg))
2542 			goto from_memcg;
2543 
2544 		objcg = READ_ONCE(current->objcg);
2545 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2546 			objcg = current_objcg_update();
2547 		/*
2548 		 * Objcg reference is kept by the task, so it's safe
2549 		 * to use the objcg by the current task.
2550 		 */
2551 		return objcg;
2552 	}
2553 
2554 	memcg = this_cpu_read(int_active_memcg);
2555 	if (unlikely(memcg))
2556 		goto from_memcg;
2557 
2558 	return NULL;
2559 
2560 from_memcg:
2561 	objcg = NULL;
2562 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2563 		/*
2564 		 * Memcg pointer is protected by scope (see set_active_memcg())
2565 		 * and is pinning the corresponding objcg, so objcg can't go
2566 		 * away and can be used within the scope without any additional
2567 		 * protection.
2568 		 */
2569 		objcg = rcu_dereference_check(memcg->objcg, 1);
2570 		if (likely(objcg))
2571 			break;
2572 	}
2573 
2574 	return objcg;
2575 }
2576 
get_obj_cgroup_from_folio(struct folio * folio)2577 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2578 {
2579 	struct obj_cgroup *objcg;
2580 
2581 	if (!memcg_kmem_online())
2582 		return NULL;
2583 
2584 	if (folio_memcg_kmem(folio)) {
2585 		objcg = __folio_objcg(folio);
2586 		obj_cgroup_get(objcg);
2587 	} else {
2588 		struct mem_cgroup *memcg;
2589 
2590 		rcu_read_lock();
2591 		memcg = __folio_memcg(folio);
2592 		if (memcg)
2593 			objcg = __get_obj_cgroup_from_memcg(memcg);
2594 		else
2595 			objcg = NULL;
2596 		rcu_read_unlock();
2597 	}
2598 	return objcg;
2599 }
2600 
2601 /*
2602  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2603  * @objcg: object cgroup to uncharge
2604  * @nr_pages: number of pages to uncharge
2605  */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2606 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2607 				      unsigned int nr_pages)
2608 {
2609 	struct mem_cgroup *memcg;
2610 
2611 	memcg = get_mem_cgroup_from_objcg(objcg);
2612 
2613 	mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2614 	memcg1_account_kmem(memcg, -nr_pages);
2615 	refill_stock(memcg, nr_pages);
2616 
2617 	css_put(&memcg->css);
2618 }
2619 
2620 /*
2621  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2622  * @objcg: object cgroup to charge
2623  * @gfp: reclaim mode
2624  * @nr_pages: number of pages to charge
2625  *
2626  * Returns 0 on success, an error code on failure.
2627  */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2628 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2629 				   unsigned int nr_pages)
2630 {
2631 	struct mem_cgroup *memcg;
2632 	int ret;
2633 
2634 	memcg = get_mem_cgroup_from_objcg(objcg);
2635 
2636 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2637 	if (ret)
2638 		goto out;
2639 
2640 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2641 	memcg1_account_kmem(memcg, nr_pages);
2642 out:
2643 	css_put(&memcg->css);
2644 
2645 	return ret;
2646 }
2647 
2648 /**
2649  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2650  * @page: page to charge
2651  * @gfp: reclaim mode
2652  * @order: allocation order
2653  *
2654  * Returns 0 on success, an error code on failure.
2655  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2656 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2657 {
2658 	struct obj_cgroup *objcg;
2659 	int ret = 0;
2660 
2661 	objcg = current_obj_cgroup();
2662 	if (objcg) {
2663 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2664 		if (!ret) {
2665 			obj_cgroup_get(objcg);
2666 			page->memcg_data = (unsigned long)objcg |
2667 				MEMCG_DATA_KMEM;
2668 			return 0;
2669 		}
2670 	}
2671 	return ret;
2672 }
2673 
2674 /**
2675  * __memcg_kmem_uncharge_page: uncharge a kmem page
2676  * @page: page to uncharge
2677  * @order: allocation order
2678  */
__memcg_kmem_uncharge_page(struct page * page,int order)2679 void __memcg_kmem_uncharge_page(struct page *page, int order)
2680 {
2681 	struct folio *folio = page_folio(page);
2682 	struct obj_cgroup *objcg;
2683 	unsigned int nr_pages = 1 << order;
2684 
2685 	if (!folio_memcg_kmem(folio))
2686 		return;
2687 
2688 	objcg = __folio_objcg(folio);
2689 	obj_cgroup_uncharge_pages(objcg, nr_pages);
2690 	folio->memcg_data = 0;
2691 	obj_cgroup_put(objcg);
2692 }
2693 
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2694 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2695 		     enum node_stat_item idx, int nr)
2696 {
2697 	struct memcg_stock_pcp *stock;
2698 	struct obj_cgroup *old = NULL;
2699 	unsigned long flags;
2700 	int *bytes;
2701 
2702 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2703 	stock = this_cpu_ptr(&memcg_stock);
2704 
2705 	/*
2706 	 * Save vmstat data in stock and skip vmstat array update unless
2707 	 * accumulating over a page of vmstat data or when pgdat or idx
2708 	 * changes.
2709 	 */
2710 	if (READ_ONCE(stock->cached_objcg) != objcg) {
2711 		old = drain_obj_stock(stock);
2712 		obj_cgroup_get(objcg);
2713 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2714 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2715 		WRITE_ONCE(stock->cached_objcg, objcg);
2716 		stock->cached_pgdat = pgdat;
2717 	} else if (stock->cached_pgdat != pgdat) {
2718 		/* Flush the existing cached vmstat data */
2719 		struct pglist_data *oldpg = stock->cached_pgdat;
2720 
2721 		if (stock->nr_slab_reclaimable_b) {
2722 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2723 					  stock->nr_slab_reclaimable_b);
2724 			stock->nr_slab_reclaimable_b = 0;
2725 		}
2726 		if (stock->nr_slab_unreclaimable_b) {
2727 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2728 					  stock->nr_slab_unreclaimable_b);
2729 			stock->nr_slab_unreclaimable_b = 0;
2730 		}
2731 		stock->cached_pgdat = pgdat;
2732 	}
2733 
2734 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2735 					       : &stock->nr_slab_unreclaimable_b;
2736 	/*
2737 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2738 	 * cached locally at least once before pushing it out.
2739 	 */
2740 	if (!*bytes) {
2741 		*bytes = nr;
2742 		nr = 0;
2743 	} else {
2744 		*bytes += nr;
2745 		if (abs(*bytes) > PAGE_SIZE) {
2746 			nr = *bytes;
2747 			*bytes = 0;
2748 		} else {
2749 			nr = 0;
2750 		}
2751 	}
2752 	if (nr)
2753 		__mod_objcg_mlstate(objcg, pgdat, idx, nr);
2754 
2755 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2756 	obj_cgroup_put(old);
2757 }
2758 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2759 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2760 {
2761 	struct memcg_stock_pcp *stock;
2762 	unsigned long flags;
2763 	bool ret = false;
2764 
2765 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2766 
2767 	stock = this_cpu_ptr(&memcg_stock);
2768 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2769 		stock->nr_bytes -= nr_bytes;
2770 		ret = true;
2771 	}
2772 
2773 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2774 
2775 	return ret;
2776 }
2777 
drain_obj_stock(struct memcg_stock_pcp * stock)2778 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2779 {
2780 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2781 
2782 	if (!old)
2783 		return NULL;
2784 
2785 	if (stock->nr_bytes) {
2786 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2787 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2788 
2789 		if (nr_pages) {
2790 			struct mem_cgroup *memcg;
2791 
2792 			memcg = get_mem_cgroup_from_objcg(old);
2793 
2794 			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2795 			memcg1_account_kmem(memcg, -nr_pages);
2796 			__refill_stock(memcg, nr_pages);
2797 
2798 			css_put(&memcg->css);
2799 		}
2800 
2801 		/*
2802 		 * The leftover is flushed to the centralized per-memcg value.
2803 		 * On the next attempt to refill obj stock it will be moved
2804 		 * to a per-cpu stock (probably, on an other CPU), see
2805 		 * refill_obj_stock().
2806 		 *
2807 		 * How often it's flushed is a trade-off between the memory
2808 		 * limit enforcement accuracy and potential CPU contention,
2809 		 * so it might be changed in the future.
2810 		 */
2811 		atomic_add(nr_bytes, &old->nr_charged_bytes);
2812 		stock->nr_bytes = 0;
2813 	}
2814 
2815 	/*
2816 	 * Flush the vmstat data in current stock
2817 	 */
2818 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2819 		if (stock->nr_slab_reclaimable_b) {
2820 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2821 					  NR_SLAB_RECLAIMABLE_B,
2822 					  stock->nr_slab_reclaimable_b);
2823 			stock->nr_slab_reclaimable_b = 0;
2824 		}
2825 		if (stock->nr_slab_unreclaimable_b) {
2826 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2827 					  NR_SLAB_UNRECLAIMABLE_B,
2828 					  stock->nr_slab_unreclaimable_b);
2829 			stock->nr_slab_unreclaimable_b = 0;
2830 		}
2831 		stock->cached_pgdat = NULL;
2832 	}
2833 
2834 	WRITE_ONCE(stock->cached_objcg, NULL);
2835 	/*
2836 	 * The `old' objects needs to be released by the caller via
2837 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2838 	 */
2839 	return old;
2840 }
2841 
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2842 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2843 				     struct mem_cgroup *root_memcg)
2844 {
2845 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2846 	struct mem_cgroup *memcg;
2847 
2848 	if (objcg) {
2849 		memcg = obj_cgroup_memcg(objcg);
2850 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2851 			return true;
2852 	}
2853 
2854 	return false;
2855 }
2856 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2857 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2858 			     bool allow_uncharge)
2859 {
2860 	struct memcg_stock_pcp *stock;
2861 	struct obj_cgroup *old = NULL;
2862 	unsigned long flags;
2863 	unsigned int nr_pages = 0;
2864 
2865 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2866 
2867 	stock = this_cpu_ptr(&memcg_stock);
2868 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2869 		old = drain_obj_stock(stock);
2870 		obj_cgroup_get(objcg);
2871 		WRITE_ONCE(stock->cached_objcg, objcg);
2872 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2873 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2874 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
2875 	}
2876 	stock->nr_bytes += nr_bytes;
2877 
2878 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2879 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2880 		stock->nr_bytes &= (PAGE_SIZE - 1);
2881 	}
2882 
2883 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2884 	obj_cgroup_put(old);
2885 
2886 	if (nr_pages)
2887 		obj_cgroup_uncharge_pages(objcg, nr_pages);
2888 }
2889 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2890 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2891 {
2892 	unsigned int nr_pages, nr_bytes;
2893 	int ret;
2894 
2895 	if (consume_obj_stock(objcg, size))
2896 		return 0;
2897 
2898 	/*
2899 	 * In theory, objcg->nr_charged_bytes can have enough
2900 	 * pre-charged bytes to satisfy the allocation. However,
2901 	 * flushing objcg->nr_charged_bytes requires two atomic
2902 	 * operations, and objcg->nr_charged_bytes can't be big.
2903 	 * The shared objcg->nr_charged_bytes can also become a
2904 	 * performance bottleneck if all tasks of the same memcg are
2905 	 * trying to update it. So it's better to ignore it and try
2906 	 * grab some new pages. The stock's nr_bytes will be flushed to
2907 	 * objcg->nr_charged_bytes later on when objcg changes.
2908 	 *
2909 	 * The stock's nr_bytes may contain enough pre-charged bytes
2910 	 * to allow one less page from being charged, but we can't rely
2911 	 * on the pre-charged bytes not being changed outside of
2912 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
2913 	 * pre-charged bytes as well when charging pages. To avoid a
2914 	 * page uncharge right after a page charge, we set the
2915 	 * allow_uncharge flag to false when calling refill_obj_stock()
2916 	 * to temporarily allow the pre-charged bytes to exceed the page
2917 	 * size limit. The maximum reachable value of the pre-charged
2918 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2919 	 * race.
2920 	 */
2921 	nr_pages = size >> PAGE_SHIFT;
2922 	nr_bytes = size & (PAGE_SIZE - 1);
2923 
2924 	if (nr_bytes)
2925 		nr_pages += 1;
2926 
2927 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2928 	if (!ret && nr_bytes)
2929 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2930 
2931 	return ret;
2932 }
2933 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)2934 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2935 {
2936 	refill_obj_stock(objcg, size, true);
2937 }
2938 
obj_full_size(struct kmem_cache * s)2939 static inline size_t obj_full_size(struct kmem_cache *s)
2940 {
2941 	/*
2942 	 * For each accounted object there is an extra space which is used
2943 	 * to store obj_cgroup membership. Charge it too.
2944 	 */
2945 	return s->size + sizeof(struct obj_cgroup *);
2946 }
2947 
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2948 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2949 				  gfp_t flags, size_t size, void **p)
2950 {
2951 	struct obj_cgroup *objcg;
2952 	struct slab *slab;
2953 	unsigned long off;
2954 	size_t i;
2955 
2956 	/*
2957 	 * The obtained objcg pointer is safe to use within the current scope,
2958 	 * defined by current task or set_active_memcg() pair.
2959 	 * obj_cgroup_get() is used to get a permanent reference.
2960 	 */
2961 	objcg = current_obj_cgroup();
2962 	if (!objcg)
2963 		return true;
2964 
2965 	/*
2966 	 * slab_alloc_node() avoids the NULL check, so we might be called with a
2967 	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2968 	 * the whole requested size.
2969 	 * return success as there's nothing to free back
2970 	 */
2971 	if (unlikely(*p == NULL))
2972 		return true;
2973 
2974 	flags &= gfp_allowed_mask;
2975 
2976 	if (lru) {
2977 		int ret;
2978 		struct mem_cgroup *memcg;
2979 
2980 		memcg = get_mem_cgroup_from_objcg(objcg);
2981 		ret = memcg_list_lru_alloc(memcg, lru, flags);
2982 		css_put(&memcg->css);
2983 
2984 		if (ret)
2985 			return false;
2986 	}
2987 
2988 	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
2989 		return false;
2990 
2991 	for (i = 0; i < size; i++) {
2992 		slab = virt_to_slab(p[i]);
2993 
2994 		if (!slab_obj_exts(slab) &&
2995 		    alloc_slab_obj_exts(slab, s, flags, false)) {
2996 			obj_cgroup_uncharge(objcg, obj_full_size(s));
2997 			continue;
2998 		}
2999 
3000 		off = obj_to_index(s, slab, p[i]);
3001 		obj_cgroup_get(objcg);
3002 		slab_obj_exts(slab)[off].objcg = objcg;
3003 		mod_objcg_state(objcg, slab_pgdat(slab),
3004 				cache_vmstat_idx(s), obj_full_size(s));
3005 	}
3006 
3007 	return true;
3008 }
3009 
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3010 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3011 			    void **p, int objects, struct slabobj_ext *obj_exts)
3012 {
3013 	for (int i = 0; i < objects; i++) {
3014 		struct obj_cgroup *objcg;
3015 		unsigned int off;
3016 
3017 		off = obj_to_index(s, slab, p[i]);
3018 		objcg = obj_exts[off].objcg;
3019 		if (!objcg)
3020 			continue;
3021 
3022 		obj_exts[off].objcg = NULL;
3023 		obj_cgroup_uncharge(objcg, obj_full_size(s));
3024 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3025 				-obj_full_size(s));
3026 		obj_cgroup_put(objcg);
3027 	}
3028 }
3029 
3030 /*
3031  * Because folio_memcg(head) is not set on tails, set it now.
3032  */
split_page_memcg(struct page * head,int old_order,int new_order)3033 void split_page_memcg(struct page *head, int old_order, int new_order)
3034 {
3035 	struct folio *folio = page_folio(head);
3036 	int i;
3037 	unsigned int old_nr = 1 << old_order;
3038 	unsigned int new_nr = 1 << new_order;
3039 
3040 	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3041 		return;
3042 
3043 	for (i = new_nr; i < old_nr; i += new_nr)
3044 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3045 
3046 	if (folio_memcg_kmem(folio))
3047 		obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3048 	else
3049 		css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3050 }
3051 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3052 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3053 {
3054 	unsigned long val;
3055 
3056 	if (mem_cgroup_is_root(memcg)) {
3057 		/*
3058 		 * Approximate root's usage from global state. This isn't
3059 		 * perfect, but the root usage was always an approximation.
3060 		 */
3061 		val = global_node_page_state(NR_FILE_PAGES) +
3062 			global_node_page_state(NR_ANON_MAPPED);
3063 		if (swap)
3064 			val += total_swap_pages - get_nr_swap_pages();
3065 	} else {
3066 		if (!swap)
3067 			val = page_counter_read(&memcg->memory);
3068 		else
3069 			val = page_counter_read(&memcg->memsw);
3070 	}
3071 	return val;
3072 }
3073 
memcg_online_kmem(struct mem_cgroup * memcg)3074 static int memcg_online_kmem(struct mem_cgroup *memcg)
3075 {
3076 	struct obj_cgroup *objcg;
3077 
3078 	if (mem_cgroup_kmem_disabled())
3079 		return 0;
3080 
3081 	if (unlikely(mem_cgroup_is_root(memcg)))
3082 		return 0;
3083 
3084 	objcg = obj_cgroup_alloc();
3085 	if (!objcg)
3086 		return -ENOMEM;
3087 
3088 	objcg->memcg = memcg;
3089 	rcu_assign_pointer(memcg->objcg, objcg);
3090 	obj_cgroup_get(objcg);
3091 	memcg->orig_objcg = objcg;
3092 
3093 	static_branch_enable(&memcg_kmem_online_key);
3094 
3095 	memcg->kmemcg_id = memcg->id.id;
3096 
3097 	return 0;
3098 }
3099 
memcg_offline_kmem(struct mem_cgroup * memcg)3100 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3101 {
3102 	struct mem_cgroup *parent;
3103 
3104 	if (mem_cgroup_kmem_disabled())
3105 		return;
3106 
3107 	if (unlikely(mem_cgroup_is_root(memcg)))
3108 		return;
3109 
3110 	parent = parent_mem_cgroup(memcg);
3111 	if (!parent)
3112 		parent = root_mem_cgroup;
3113 
3114 	memcg_reparent_list_lrus(memcg, parent);
3115 
3116 	/*
3117 	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3118 	 * helpers won't use parent's list_lru until child is drained.
3119 	 */
3120 	memcg_reparent_objcgs(memcg, parent);
3121 }
3122 
3123 #ifdef CONFIG_CGROUP_WRITEBACK
3124 
3125 #include <trace/events/writeback.h>
3126 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3127 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3128 {
3129 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3130 }
3131 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3132 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3133 {
3134 	wb_domain_exit(&memcg->cgwb_domain);
3135 }
3136 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3137 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3138 {
3139 	wb_domain_size_changed(&memcg->cgwb_domain);
3140 }
3141 
mem_cgroup_wb_domain(struct bdi_writeback * wb)3142 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3143 {
3144 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3145 
3146 	if (!memcg->css.parent)
3147 		return NULL;
3148 
3149 	return &memcg->cgwb_domain;
3150 }
3151 
3152 /**
3153  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3154  * @wb: bdi_writeback in question
3155  * @pfilepages: out parameter for number of file pages
3156  * @pheadroom: out parameter for number of allocatable pages according to memcg
3157  * @pdirty: out parameter for number of dirty pages
3158  * @pwriteback: out parameter for number of pages under writeback
3159  *
3160  * Determine the numbers of file, headroom, dirty, and writeback pages in
3161  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3162  * is a bit more involved.
3163  *
3164  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3165  * headroom is calculated as the lowest headroom of itself and the
3166  * ancestors.  Note that this doesn't consider the actual amount of
3167  * available memory in the system.  The caller should further cap
3168  * *@pheadroom accordingly.
3169  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3170 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3171 			 unsigned long *pheadroom, unsigned long *pdirty,
3172 			 unsigned long *pwriteback)
3173 {
3174 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3175 	struct mem_cgroup *parent;
3176 
3177 	mem_cgroup_flush_stats_ratelimited(memcg);
3178 
3179 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3180 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3181 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3182 			memcg_page_state(memcg, NR_ACTIVE_FILE);
3183 
3184 	*pheadroom = PAGE_COUNTER_MAX;
3185 	while ((parent = parent_mem_cgroup(memcg))) {
3186 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3187 					    READ_ONCE(memcg->memory.high));
3188 		unsigned long used = page_counter_read(&memcg->memory);
3189 
3190 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3191 		memcg = parent;
3192 	}
3193 }
3194 
3195 /*
3196  * Foreign dirty flushing
3197  *
3198  * There's an inherent mismatch between memcg and writeback.  The former
3199  * tracks ownership per-page while the latter per-inode.  This was a
3200  * deliberate design decision because honoring per-page ownership in the
3201  * writeback path is complicated, may lead to higher CPU and IO overheads
3202  * and deemed unnecessary given that write-sharing an inode across
3203  * different cgroups isn't a common use-case.
3204  *
3205  * Combined with inode majority-writer ownership switching, this works well
3206  * enough in most cases but there are some pathological cases.  For
3207  * example, let's say there are two cgroups A and B which keep writing to
3208  * different but confined parts of the same inode.  B owns the inode and
3209  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3210  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3211  * triggering background writeback.  A will be slowed down without a way to
3212  * make writeback of the dirty pages happen.
3213  *
3214  * Conditions like the above can lead to a cgroup getting repeatedly and
3215  * severely throttled after making some progress after each
3216  * dirty_expire_interval while the underlying IO device is almost
3217  * completely idle.
3218  *
3219  * Solving this problem completely requires matching the ownership tracking
3220  * granularities between memcg and writeback in either direction.  However,
3221  * the more egregious behaviors can be avoided by simply remembering the
3222  * most recent foreign dirtying events and initiating remote flushes on
3223  * them when local writeback isn't enough to keep the memory clean enough.
3224  *
3225  * The following two functions implement such mechanism.  When a foreign
3226  * page - a page whose memcg and writeback ownerships don't match - is
3227  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3228  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3229  * decides that the memcg needs to sleep due to high dirty ratio, it calls
3230  * mem_cgroup_flush_foreign() which queues writeback on the recorded
3231  * foreign bdi_writebacks which haven't expired.  Both the numbers of
3232  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3233  * limited to MEMCG_CGWB_FRN_CNT.
3234  *
3235  * The mechanism only remembers IDs and doesn't hold any object references.
3236  * As being wrong occasionally doesn't matter, updates and accesses to the
3237  * records are lockless and racy.
3238  */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3239 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3240 					     struct bdi_writeback *wb)
3241 {
3242 	struct mem_cgroup *memcg = folio_memcg(folio);
3243 	struct memcg_cgwb_frn *frn;
3244 	u64 now = get_jiffies_64();
3245 	u64 oldest_at = now;
3246 	int oldest = -1;
3247 	int i;
3248 
3249 	trace_track_foreign_dirty(folio, wb);
3250 
3251 	/*
3252 	 * Pick the slot to use.  If there is already a slot for @wb, keep
3253 	 * using it.  If not replace the oldest one which isn't being
3254 	 * written out.
3255 	 */
3256 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3257 		frn = &memcg->cgwb_frn[i];
3258 		if (frn->bdi_id == wb->bdi->id &&
3259 		    frn->memcg_id == wb->memcg_css->id)
3260 			break;
3261 		if (time_before64(frn->at, oldest_at) &&
3262 		    atomic_read(&frn->done.cnt) == 1) {
3263 			oldest = i;
3264 			oldest_at = frn->at;
3265 		}
3266 	}
3267 
3268 	if (i < MEMCG_CGWB_FRN_CNT) {
3269 		/*
3270 		 * Re-using an existing one.  Update timestamp lazily to
3271 		 * avoid making the cacheline hot.  We want them to be
3272 		 * reasonably up-to-date and significantly shorter than
3273 		 * dirty_expire_interval as that's what expires the record.
3274 		 * Use the shorter of 1s and dirty_expire_interval / 8.
3275 		 */
3276 		unsigned long update_intv =
3277 			min_t(unsigned long, HZ,
3278 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3279 
3280 		if (time_before64(frn->at, now - update_intv))
3281 			frn->at = now;
3282 	} else if (oldest >= 0) {
3283 		/* replace the oldest free one */
3284 		frn = &memcg->cgwb_frn[oldest];
3285 		frn->bdi_id = wb->bdi->id;
3286 		frn->memcg_id = wb->memcg_css->id;
3287 		frn->at = now;
3288 	}
3289 }
3290 
3291 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3292 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3293 {
3294 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3295 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3296 	u64 now = jiffies_64;
3297 	int i;
3298 
3299 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3300 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3301 
3302 		/*
3303 		 * If the record is older than dirty_expire_interval,
3304 		 * writeback on it has already started.  No need to kick it
3305 		 * off again.  Also, don't start a new one if there's
3306 		 * already one in flight.
3307 		 */
3308 		if (time_after64(frn->at, now - intv) &&
3309 		    atomic_read(&frn->done.cnt) == 1) {
3310 			frn->at = 0;
3311 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3312 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3313 					       WB_REASON_FOREIGN_FLUSH,
3314 					       &frn->done);
3315 		}
3316 	}
3317 }
3318 
3319 #else	/* CONFIG_CGROUP_WRITEBACK */
3320 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3321 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3322 {
3323 	return 0;
3324 }
3325 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3326 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3327 {
3328 }
3329 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3330 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3331 {
3332 }
3333 
3334 #endif	/* CONFIG_CGROUP_WRITEBACK */
3335 
3336 /*
3337  * Private memory cgroup IDR
3338  *
3339  * Swap-out records and page cache shadow entries need to store memcg
3340  * references in constrained space, so we maintain an ID space that is
3341  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3342  * memory-controlled cgroups to 64k.
3343  *
3344  * However, there usually are many references to the offline CSS after
3345  * the cgroup has been destroyed, such as page cache or reclaimable
3346  * slab objects, that don't need to hang on to the ID. We want to keep
3347  * those dead CSS from occupying IDs, or we might quickly exhaust the
3348  * relatively small ID space and prevent the creation of new cgroups
3349  * even when there are much fewer than 64k cgroups - possibly none.
3350  *
3351  * Maintain a private 16-bit ID space for memcg, and allow the ID to
3352  * be freed and recycled when it's no longer needed, which is usually
3353  * when the CSS is offlined.
3354  *
3355  * The only exception to that are records of swapped out tmpfs/shmem
3356  * pages that need to be attributed to live ancestors on swapin. But
3357  * those references are manageable from userspace.
3358  */
3359 
3360 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3361 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3362 
mem_cgroup_id_remove(struct mem_cgroup * memcg)3363 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3364 {
3365 	if (memcg->id.id > 0) {
3366 		xa_erase(&mem_cgroup_ids, memcg->id.id);
3367 		memcg->id.id = 0;
3368 	}
3369 }
3370 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3371 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3372 					   unsigned int n)
3373 {
3374 	refcount_add(n, &memcg->id.ref);
3375 }
3376 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3377 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3378 {
3379 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3380 		mem_cgroup_id_remove(memcg);
3381 
3382 		/* Memcg ID pins CSS */
3383 		css_put(&memcg->css);
3384 	}
3385 }
3386 
mem_cgroup_id_put(struct mem_cgroup * memcg)3387 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3388 {
3389 	mem_cgroup_id_put_many(memcg, 1);
3390 }
3391 
3392 /**
3393  * mem_cgroup_from_id - look up a memcg from a memcg id
3394  * @id: the memcg id to look up
3395  *
3396  * Caller must hold rcu_read_lock().
3397  */
mem_cgroup_from_id(unsigned short id)3398 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3399 {
3400 	WARN_ON_ONCE(!rcu_read_lock_held());
3401 	return xa_load(&mem_cgroup_ids, id);
3402 }
3403 
3404 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3405 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3406 {
3407 	struct cgroup *cgrp;
3408 	struct cgroup_subsys_state *css;
3409 	struct mem_cgroup *memcg;
3410 
3411 	cgrp = cgroup_get_from_id(ino);
3412 	if (IS_ERR(cgrp))
3413 		return ERR_CAST(cgrp);
3414 
3415 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3416 	if (css)
3417 		memcg = container_of(css, struct mem_cgroup, css);
3418 	else
3419 		memcg = ERR_PTR(-ENOENT);
3420 
3421 	cgroup_put(cgrp);
3422 
3423 	return memcg;
3424 }
3425 #endif
3426 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3427 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3428 {
3429 	struct mem_cgroup_per_node *pn;
3430 
3431 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3432 	if (!pn)
3433 		return false;
3434 
3435 	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3436 					GFP_KERNEL_ACCOUNT, node);
3437 	if (!pn->lruvec_stats)
3438 		goto fail;
3439 
3440 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3441 						   GFP_KERNEL_ACCOUNT);
3442 	if (!pn->lruvec_stats_percpu)
3443 		goto fail;
3444 
3445 	lruvec_init(&pn->lruvec);
3446 	pn->memcg = memcg;
3447 
3448 	memcg->nodeinfo[node] = pn;
3449 	return true;
3450 fail:
3451 	kfree(pn->lruvec_stats);
3452 	kfree(pn);
3453 	return false;
3454 }
3455 
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3456 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3457 {
3458 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3459 
3460 	if (!pn)
3461 		return;
3462 
3463 	free_percpu(pn->lruvec_stats_percpu);
3464 	kfree(pn->lruvec_stats);
3465 	kfree(pn);
3466 }
3467 
__mem_cgroup_free(struct mem_cgroup * memcg)3468 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3469 {
3470 	int node;
3471 
3472 	obj_cgroup_put(memcg->orig_objcg);
3473 
3474 	for_each_node(node)
3475 		free_mem_cgroup_per_node_info(memcg, node);
3476 	memcg1_free_events(memcg);
3477 	kfree(memcg->vmstats);
3478 	free_percpu(memcg->vmstats_percpu);
3479 	kfree(memcg);
3480 }
3481 
mem_cgroup_free(struct mem_cgroup * memcg)3482 static void mem_cgroup_free(struct mem_cgroup *memcg)
3483 {
3484 	lru_gen_exit_memcg(memcg);
3485 	memcg_wb_domain_exit(memcg);
3486 	__mem_cgroup_free(memcg);
3487 }
3488 
mem_cgroup_alloc(struct mem_cgroup * parent)3489 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3490 {
3491 	struct memcg_vmstats_percpu *statc, *pstatc;
3492 	struct mem_cgroup *memcg;
3493 	int node, cpu;
3494 	int __maybe_unused i;
3495 	long error;
3496 
3497 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3498 	if (!memcg)
3499 		return ERR_PTR(-ENOMEM);
3500 
3501 	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3502 			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3503 	if (error)
3504 		goto fail;
3505 	error = -ENOMEM;
3506 
3507 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3508 				 GFP_KERNEL_ACCOUNT);
3509 	if (!memcg->vmstats)
3510 		goto fail;
3511 
3512 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3513 						 GFP_KERNEL_ACCOUNT);
3514 	if (!memcg->vmstats_percpu)
3515 		goto fail;
3516 
3517 	if (!memcg1_alloc_events(memcg))
3518 		goto fail;
3519 
3520 	for_each_possible_cpu(cpu) {
3521 		if (parent)
3522 			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3523 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3524 		statc->parent = parent ? pstatc : NULL;
3525 		statc->vmstats = memcg->vmstats;
3526 	}
3527 
3528 	for_each_node(node)
3529 		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3530 			goto fail;
3531 
3532 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3533 		goto fail;
3534 
3535 	INIT_WORK(&memcg->high_work, high_work_func);
3536 	vmpressure_init(&memcg->vmpressure);
3537 	INIT_LIST_HEAD(&memcg->memory_peaks);
3538 	INIT_LIST_HEAD(&memcg->swap_peaks);
3539 	spin_lock_init(&memcg->peaks_lock);
3540 	memcg->socket_pressure = jiffies;
3541 	memcg1_memcg_init(memcg);
3542 	memcg->kmemcg_id = -1;
3543 	INIT_LIST_HEAD(&memcg->objcg_list);
3544 #ifdef CONFIG_CGROUP_WRITEBACK
3545 	INIT_LIST_HEAD(&memcg->cgwb_list);
3546 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3547 		memcg->cgwb_frn[i].done =
3548 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3549 #endif
3550 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3551 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3552 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3553 	memcg->deferred_split_queue.split_queue_len = 0;
3554 #endif
3555 	lru_gen_init_memcg(memcg);
3556 	return memcg;
3557 fail:
3558 	mem_cgroup_id_remove(memcg);
3559 	__mem_cgroup_free(memcg);
3560 	return ERR_PTR(error);
3561 }
3562 
3563 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3564 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3565 {
3566 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3567 	struct mem_cgroup *memcg, *old_memcg;
3568 
3569 	old_memcg = set_active_memcg(parent);
3570 	memcg = mem_cgroup_alloc(parent);
3571 	set_active_memcg(old_memcg);
3572 	if (IS_ERR(memcg))
3573 		return ERR_CAST(memcg);
3574 
3575 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3576 	memcg1_soft_limit_reset(memcg);
3577 #ifdef CONFIG_ZSWAP
3578 	memcg->zswap_max = PAGE_COUNTER_MAX;
3579 	WRITE_ONCE(memcg->zswap_writeback, true);
3580 #endif
3581 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3582 	if (parent) {
3583 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3584 
3585 		page_counter_init(&memcg->memory, &parent->memory, true);
3586 		page_counter_init(&memcg->swap, &parent->swap, false);
3587 #ifdef CONFIG_MEMCG_V1
3588 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3589 		page_counter_init(&memcg->kmem, &parent->kmem, false);
3590 		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3591 #endif
3592 	} else {
3593 		init_memcg_stats();
3594 		init_memcg_events();
3595 		page_counter_init(&memcg->memory, NULL, true);
3596 		page_counter_init(&memcg->swap, NULL, false);
3597 #ifdef CONFIG_MEMCG_V1
3598 		page_counter_init(&memcg->kmem, NULL, false);
3599 		page_counter_init(&memcg->tcpmem, NULL, false);
3600 #endif
3601 		root_mem_cgroup = memcg;
3602 		return &memcg->css;
3603 	}
3604 
3605 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3606 		static_branch_inc(&memcg_sockets_enabled_key);
3607 
3608 	if (!cgroup_memory_nobpf)
3609 		static_branch_inc(&memcg_bpf_enabled_key);
3610 
3611 	return &memcg->css;
3612 }
3613 
mem_cgroup_css_online(struct cgroup_subsys_state * css)3614 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3615 {
3616 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3617 
3618 	if (memcg_online_kmem(memcg))
3619 		goto remove_id;
3620 
3621 	/*
3622 	 * A memcg must be visible for expand_shrinker_info()
3623 	 * by the time the maps are allocated. So, we allocate maps
3624 	 * here, when for_each_mem_cgroup() can't skip it.
3625 	 */
3626 	if (alloc_shrinker_info(memcg))
3627 		goto offline_kmem;
3628 
3629 	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3630 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3631 				   FLUSH_TIME);
3632 	lru_gen_online_memcg(memcg);
3633 
3634 	/* Online state pins memcg ID, memcg ID pins CSS */
3635 	refcount_set(&memcg->id.ref, 1);
3636 	css_get(css);
3637 
3638 	/*
3639 	 * Ensure mem_cgroup_from_id() works once we're fully online.
3640 	 *
3641 	 * We could do this earlier and require callers to filter with
3642 	 * css_tryget_online(). But right now there are no users that
3643 	 * need earlier access, and the workingset code relies on the
3644 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3645 	 * publish it here at the end of onlining. This matches the
3646 	 * regular ID destruction during offlining.
3647 	 */
3648 	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3649 
3650 	return 0;
3651 offline_kmem:
3652 	memcg_offline_kmem(memcg);
3653 remove_id:
3654 	mem_cgroup_id_remove(memcg);
3655 	return -ENOMEM;
3656 }
3657 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3658 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3659 {
3660 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3661 
3662 	memcg1_css_offline(memcg);
3663 
3664 	page_counter_set_min(&memcg->memory, 0);
3665 	page_counter_set_low(&memcg->memory, 0);
3666 
3667 	zswap_memcg_offline_cleanup(memcg);
3668 
3669 	memcg_offline_kmem(memcg);
3670 	reparent_shrinker_deferred(memcg);
3671 	wb_memcg_offline(memcg);
3672 	lru_gen_offline_memcg(memcg);
3673 
3674 	drain_all_stock(memcg);
3675 
3676 	mem_cgroup_id_put(memcg);
3677 }
3678 
mem_cgroup_css_released(struct cgroup_subsys_state * css)3679 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3680 {
3681 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3682 
3683 	invalidate_reclaim_iterators(memcg);
3684 	lru_gen_release_memcg(memcg);
3685 }
3686 
mem_cgroup_css_free(struct cgroup_subsys_state * css)3687 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3688 {
3689 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3690 	int __maybe_unused i;
3691 
3692 #ifdef CONFIG_CGROUP_WRITEBACK
3693 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3694 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3695 #endif
3696 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3697 		static_branch_dec(&memcg_sockets_enabled_key);
3698 
3699 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3700 		static_branch_dec(&memcg_sockets_enabled_key);
3701 
3702 	if (!cgroup_memory_nobpf)
3703 		static_branch_dec(&memcg_bpf_enabled_key);
3704 
3705 	vmpressure_cleanup(&memcg->vmpressure);
3706 	cancel_work_sync(&memcg->high_work);
3707 	memcg1_remove_from_trees(memcg);
3708 	free_shrinker_info(memcg);
3709 	mem_cgroup_free(memcg);
3710 }
3711 
3712 /**
3713  * mem_cgroup_css_reset - reset the states of a mem_cgroup
3714  * @css: the target css
3715  *
3716  * Reset the states of the mem_cgroup associated with @css.  This is
3717  * invoked when the userland requests disabling on the default hierarchy
3718  * but the memcg is pinned through dependency.  The memcg should stop
3719  * applying policies and should revert to the vanilla state as it may be
3720  * made visible again.
3721  *
3722  * The current implementation only resets the essential configurations.
3723  * This needs to be expanded to cover all the visible parts.
3724  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3725 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3726 {
3727 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3728 
3729 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3730 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3731 #ifdef CONFIG_MEMCG_V1
3732 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3733 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3734 #endif
3735 	page_counter_set_min(&memcg->memory, 0);
3736 	page_counter_set_low(&memcg->memory, 0);
3737 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3738 	memcg1_soft_limit_reset(memcg);
3739 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3740 	memcg_wb_domain_size_changed(memcg);
3741 }
3742 
3743 struct aggregate_control {
3744 	/* pointer to the aggregated (CPU and subtree aggregated) counters */
3745 	long *aggregate;
3746 	/* pointer to the non-hierarchichal (CPU aggregated) counters */
3747 	long *local;
3748 	/* pointer to the pending child counters during tree propagation */
3749 	long *pending;
3750 	/* pointer to the parent's pending counters, could be NULL */
3751 	long *ppending;
3752 	/* pointer to the percpu counters to be aggregated */
3753 	long *cstat;
3754 	/* pointer to the percpu counters of the last aggregation*/
3755 	long *cstat_prev;
3756 	/* size of the above counters */
3757 	int size;
3758 };
3759 
mem_cgroup_stat_aggregate(struct aggregate_control * ac)3760 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3761 {
3762 	int i;
3763 	long delta, delta_cpu, v;
3764 
3765 	for (i = 0; i < ac->size; i++) {
3766 		/*
3767 		 * Collect the aggregated propagation counts of groups
3768 		 * below us. We're in a per-cpu loop here and this is
3769 		 * a global counter, so the first cycle will get them.
3770 		 */
3771 		delta = ac->pending[i];
3772 		if (delta)
3773 			ac->pending[i] = 0;
3774 
3775 		/* Add CPU changes on this level since the last flush */
3776 		delta_cpu = 0;
3777 		v = READ_ONCE(ac->cstat[i]);
3778 		if (v != ac->cstat_prev[i]) {
3779 			delta_cpu = v - ac->cstat_prev[i];
3780 			delta += delta_cpu;
3781 			ac->cstat_prev[i] = v;
3782 		}
3783 
3784 		/* Aggregate counts on this level and propagate upwards */
3785 		if (delta_cpu)
3786 			ac->local[i] += delta_cpu;
3787 
3788 		if (delta) {
3789 			ac->aggregate[i] += delta;
3790 			if (ac->ppending)
3791 				ac->ppending[i] += delta;
3792 		}
3793 	}
3794 }
3795 
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3796 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3797 {
3798 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3799 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3800 	struct memcg_vmstats_percpu *statc;
3801 	struct aggregate_control ac;
3802 	int nid;
3803 
3804 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3805 
3806 	ac = (struct aggregate_control) {
3807 		.aggregate = memcg->vmstats->state,
3808 		.local = memcg->vmstats->state_local,
3809 		.pending = memcg->vmstats->state_pending,
3810 		.ppending = parent ? parent->vmstats->state_pending : NULL,
3811 		.cstat = statc->state,
3812 		.cstat_prev = statc->state_prev,
3813 		.size = MEMCG_VMSTAT_SIZE,
3814 	};
3815 	mem_cgroup_stat_aggregate(&ac);
3816 
3817 	ac = (struct aggregate_control) {
3818 		.aggregate = memcg->vmstats->events,
3819 		.local = memcg->vmstats->events_local,
3820 		.pending = memcg->vmstats->events_pending,
3821 		.ppending = parent ? parent->vmstats->events_pending : NULL,
3822 		.cstat = statc->events,
3823 		.cstat_prev = statc->events_prev,
3824 		.size = NR_MEMCG_EVENTS,
3825 	};
3826 	mem_cgroup_stat_aggregate(&ac);
3827 
3828 	for_each_node_state(nid, N_MEMORY) {
3829 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3830 		struct lruvec_stats *lstats = pn->lruvec_stats;
3831 		struct lruvec_stats *plstats = NULL;
3832 		struct lruvec_stats_percpu *lstatc;
3833 
3834 		if (parent)
3835 			plstats = parent->nodeinfo[nid]->lruvec_stats;
3836 
3837 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3838 
3839 		ac = (struct aggregate_control) {
3840 			.aggregate = lstats->state,
3841 			.local = lstats->state_local,
3842 			.pending = lstats->state_pending,
3843 			.ppending = plstats ? plstats->state_pending : NULL,
3844 			.cstat = lstatc->state,
3845 			.cstat_prev = lstatc->state_prev,
3846 			.size = NR_MEMCG_NODE_STAT_ITEMS,
3847 		};
3848 		mem_cgroup_stat_aggregate(&ac);
3849 
3850 	}
3851 	WRITE_ONCE(statc->stats_updates, 0);
3852 	/* We are in a per-cpu loop here, only do the atomic write once */
3853 	if (atomic64_read(&memcg->vmstats->stats_updates))
3854 		atomic64_set(&memcg->vmstats->stats_updates, 0);
3855 }
3856 
mem_cgroup_fork(struct task_struct * task)3857 static void mem_cgroup_fork(struct task_struct *task)
3858 {
3859 	/*
3860 	 * Set the update flag to cause task->objcg to be initialized lazily
3861 	 * on the first allocation. It can be done without any synchronization
3862 	 * because it's always performed on the current task, so does
3863 	 * current_objcg_update().
3864 	 */
3865 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3866 }
3867 
mem_cgroup_exit(struct task_struct * task)3868 static void mem_cgroup_exit(struct task_struct *task)
3869 {
3870 	struct obj_cgroup *objcg = task->objcg;
3871 
3872 	objcg = (struct obj_cgroup *)
3873 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3874 	obj_cgroup_put(objcg);
3875 
3876 	/*
3877 	 * Some kernel allocations can happen after this point,
3878 	 * but let's ignore them. It can be done without any synchronization
3879 	 * because it's always performed on the current task, so does
3880 	 * current_objcg_update().
3881 	 */
3882 	task->objcg = NULL;
3883 }
3884 
3885 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3886 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3887 {
3888 	struct task_struct *task;
3889 	struct cgroup_subsys_state *css;
3890 
3891 	/* find the first leader if there is any */
3892 	cgroup_taskset_for_each_leader(task, css, tset)
3893 		break;
3894 
3895 	if (!task)
3896 		return;
3897 
3898 	task_lock(task);
3899 	if (task->mm && READ_ONCE(task->mm->owner) == task)
3900 		lru_gen_migrate_mm(task->mm);
3901 	task_unlock(task);
3902 }
3903 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3904 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3905 #endif /* CONFIG_LRU_GEN */
3906 
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)3907 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3908 {
3909 	struct task_struct *task;
3910 	struct cgroup_subsys_state *css;
3911 
3912 	cgroup_taskset_for_each(task, css, tset) {
3913 		/* atomically set the update bit */
3914 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3915 	}
3916 }
3917 
mem_cgroup_attach(struct cgroup_taskset * tset)3918 static void mem_cgroup_attach(struct cgroup_taskset *tset)
3919 {
3920 	mem_cgroup_lru_gen_attach(tset);
3921 	mem_cgroup_kmem_attach(tset);
3922 }
3923 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)3924 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3925 {
3926 	if (value == PAGE_COUNTER_MAX)
3927 		seq_puts(m, "max\n");
3928 	else
3929 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3930 
3931 	return 0;
3932 }
3933 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)3934 static u64 memory_current_read(struct cgroup_subsys_state *css,
3935 			       struct cftype *cft)
3936 {
3937 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3938 
3939 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3940 }
3941 
3942 #define OFP_PEAK_UNSET (((-1UL)))
3943 
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)3944 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
3945 {
3946 	struct cgroup_of_peak *ofp = of_peak(sf->private);
3947 	u64 fd_peak = READ_ONCE(ofp->value), peak;
3948 
3949 	/* User wants global or local peak? */
3950 	if (fd_peak == OFP_PEAK_UNSET)
3951 		peak = pc->watermark;
3952 	else
3953 		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
3954 
3955 	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
3956 	return 0;
3957 }
3958 
memory_peak_show(struct seq_file * sf,void * v)3959 static int memory_peak_show(struct seq_file *sf, void *v)
3960 {
3961 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3962 
3963 	return peak_show(sf, v, &memcg->memory);
3964 }
3965 
peak_open(struct kernfs_open_file * of)3966 static int peak_open(struct kernfs_open_file *of)
3967 {
3968 	struct cgroup_of_peak *ofp = of_peak(of);
3969 
3970 	ofp->value = OFP_PEAK_UNSET;
3971 	return 0;
3972 }
3973 
peak_release(struct kernfs_open_file * of)3974 static void peak_release(struct kernfs_open_file *of)
3975 {
3976 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3977 	struct cgroup_of_peak *ofp = of_peak(of);
3978 
3979 	if (ofp->value == OFP_PEAK_UNSET) {
3980 		/* fast path (no writes on this fd) */
3981 		return;
3982 	}
3983 	spin_lock(&memcg->peaks_lock);
3984 	list_del(&ofp->list);
3985 	spin_unlock(&memcg->peaks_lock);
3986 }
3987 
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)3988 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
3989 			  loff_t off, struct page_counter *pc,
3990 			  struct list_head *watchers)
3991 {
3992 	unsigned long usage;
3993 	struct cgroup_of_peak *peer_ctx;
3994 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3995 	struct cgroup_of_peak *ofp = of_peak(of);
3996 
3997 	spin_lock(&memcg->peaks_lock);
3998 
3999 	usage = page_counter_read(pc);
4000 	WRITE_ONCE(pc->local_watermark, usage);
4001 
4002 	list_for_each_entry(peer_ctx, watchers, list)
4003 		if (usage > peer_ctx->value)
4004 			WRITE_ONCE(peer_ctx->value, usage);
4005 
4006 	/* initial write, register watcher */
4007 	if (ofp->value == -1)
4008 		list_add(&ofp->list, watchers);
4009 
4010 	WRITE_ONCE(ofp->value, usage);
4011 	spin_unlock(&memcg->peaks_lock);
4012 
4013 	return nbytes;
4014 }
4015 
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4016 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4017 				 size_t nbytes, loff_t off)
4018 {
4019 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4020 
4021 	return peak_write(of, buf, nbytes, off, &memcg->memory,
4022 			  &memcg->memory_peaks);
4023 }
4024 
4025 #undef OFP_PEAK_UNSET
4026 
memory_min_show(struct seq_file * m,void * v)4027 static int memory_min_show(struct seq_file *m, void *v)
4028 {
4029 	return seq_puts_memcg_tunable(m,
4030 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4031 }
4032 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4033 static ssize_t memory_min_write(struct kernfs_open_file *of,
4034 				char *buf, size_t nbytes, loff_t off)
4035 {
4036 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4037 	unsigned long min;
4038 	int err;
4039 
4040 	buf = strstrip(buf);
4041 	err = page_counter_memparse(buf, "max", &min);
4042 	if (err)
4043 		return err;
4044 
4045 	page_counter_set_min(&memcg->memory, min);
4046 
4047 	return nbytes;
4048 }
4049 
memory_low_show(struct seq_file * m,void * v)4050 static int memory_low_show(struct seq_file *m, void *v)
4051 {
4052 	return seq_puts_memcg_tunable(m,
4053 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4054 }
4055 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4056 static ssize_t memory_low_write(struct kernfs_open_file *of,
4057 				char *buf, size_t nbytes, loff_t off)
4058 {
4059 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4060 	unsigned long low;
4061 	int err;
4062 
4063 	buf = strstrip(buf);
4064 	err = page_counter_memparse(buf, "max", &low);
4065 	if (err)
4066 		return err;
4067 
4068 	page_counter_set_low(&memcg->memory, low);
4069 
4070 	return nbytes;
4071 }
4072 
memory_high_show(struct seq_file * m,void * v)4073 static int memory_high_show(struct seq_file *m, void *v)
4074 {
4075 	return seq_puts_memcg_tunable(m,
4076 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4077 }
4078 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4079 static ssize_t memory_high_write(struct kernfs_open_file *of,
4080 				 char *buf, size_t nbytes, loff_t off)
4081 {
4082 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4083 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4084 	bool drained = false;
4085 	unsigned long high;
4086 	int err;
4087 
4088 	buf = strstrip(buf);
4089 	err = page_counter_memparse(buf, "max", &high);
4090 	if (err)
4091 		return err;
4092 
4093 	page_counter_set_high(&memcg->memory, high);
4094 
4095 	for (;;) {
4096 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4097 		unsigned long reclaimed;
4098 
4099 		if (nr_pages <= high)
4100 			break;
4101 
4102 		if (signal_pending(current))
4103 			break;
4104 
4105 		if (!drained) {
4106 			drain_all_stock(memcg);
4107 			drained = true;
4108 			continue;
4109 		}
4110 
4111 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4112 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4113 
4114 		if (!reclaimed && !nr_retries--)
4115 			break;
4116 	}
4117 
4118 	memcg_wb_domain_size_changed(memcg);
4119 	return nbytes;
4120 }
4121 
memory_max_show(struct seq_file * m,void * v)4122 static int memory_max_show(struct seq_file *m, void *v)
4123 {
4124 	return seq_puts_memcg_tunable(m,
4125 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4126 }
4127 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4128 static ssize_t memory_max_write(struct kernfs_open_file *of,
4129 				char *buf, size_t nbytes, loff_t off)
4130 {
4131 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4132 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4133 	bool drained = false;
4134 	unsigned long max;
4135 	int err;
4136 
4137 	buf = strstrip(buf);
4138 	err = page_counter_memparse(buf, "max", &max);
4139 	if (err)
4140 		return err;
4141 
4142 	xchg(&memcg->memory.max, max);
4143 
4144 	for (;;) {
4145 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4146 
4147 		if (nr_pages <= max)
4148 			break;
4149 
4150 		if (signal_pending(current))
4151 			break;
4152 
4153 		if (!drained) {
4154 			drain_all_stock(memcg);
4155 			drained = true;
4156 			continue;
4157 		}
4158 
4159 		if (nr_reclaims) {
4160 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4161 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4162 				nr_reclaims--;
4163 			continue;
4164 		}
4165 
4166 		memcg_memory_event(memcg, MEMCG_OOM);
4167 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4168 			break;
4169 		cond_resched();
4170 	}
4171 
4172 	memcg_wb_domain_size_changed(memcg);
4173 	return nbytes;
4174 }
4175 
4176 /*
4177  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4178  * if any new events become available.
4179  */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4180 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4181 {
4182 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4183 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4184 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4185 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4186 	seq_printf(m, "oom_kill %lu\n",
4187 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4188 	seq_printf(m, "oom_group_kill %lu\n",
4189 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4190 }
4191 
memory_events_show(struct seq_file * m,void * v)4192 static int memory_events_show(struct seq_file *m, void *v)
4193 {
4194 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4195 
4196 	__memory_events_show(m, memcg->memory_events);
4197 	return 0;
4198 }
4199 
memory_events_local_show(struct seq_file * m,void * v)4200 static int memory_events_local_show(struct seq_file *m, void *v)
4201 {
4202 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4203 
4204 	__memory_events_show(m, memcg->memory_events_local);
4205 	return 0;
4206 }
4207 
memory_stat_show(struct seq_file * m,void * v)4208 int memory_stat_show(struct seq_file *m, void *v)
4209 {
4210 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4211 	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4212 	struct seq_buf s;
4213 
4214 	if (!buf)
4215 		return -ENOMEM;
4216 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4217 	memory_stat_format(memcg, &s);
4218 	seq_puts(m, buf);
4219 	kfree(buf);
4220 	return 0;
4221 }
4222 
4223 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4224 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4225 						     int item)
4226 {
4227 	return lruvec_page_state(lruvec, item) *
4228 		memcg_page_state_output_unit(item);
4229 }
4230 
memory_numa_stat_show(struct seq_file * m,void * v)4231 static int memory_numa_stat_show(struct seq_file *m, void *v)
4232 {
4233 	int i;
4234 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4235 
4236 	mem_cgroup_flush_stats(memcg);
4237 
4238 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4239 		int nid;
4240 
4241 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4242 			continue;
4243 
4244 		seq_printf(m, "%s", memory_stats[i].name);
4245 		for_each_node_state(nid, N_MEMORY) {
4246 			u64 size;
4247 			struct lruvec *lruvec;
4248 
4249 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4250 			size = lruvec_page_state_output(lruvec,
4251 							memory_stats[i].idx);
4252 			seq_printf(m, " N%d=%llu", nid, size);
4253 		}
4254 		seq_putc(m, '\n');
4255 	}
4256 
4257 	return 0;
4258 }
4259 #endif
4260 
memory_oom_group_show(struct seq_file * m,void * v)4261 static int memory_oom_group_show(struct seq_file *m, void *v)
4262 {
4263 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4264 
4265 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4266 
4267 	return 0;
4268 }
4269 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4270 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4271 				      char *buf, size_t nbytes, loff_t off)
4272 {
4273 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4274 	int ret, oom_group;
4275 
4276 	buf = strstrip(buf);
4277 	if (!buf)
4278 		return -EINVAL;
4279 
4280 	ret = kstrtoint(buf, 0, &oom_group);
4281 	if (ret)
4282 		return ret;
4283 
4284 	if (oom_group != 0 && oom_group != 1)
4285 		return -EINVAL;
4286 
4287 	WRITE_ONCE(memcg->oom_group, oom_group);
4288 
4289 	return nbytes;
4290 }
4291 
4292 enum {
4293 	MEMORY_RECLAIM_SWAPPINESS = 0,
4294 	MEMORY_RECLAIM_NULL,
4295 };
4296 
4297 static const match_table_t tokens = {
4298 	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4299 	{ MEMORY_RECLAIM_NULL, NULL },
4300 };
4301 
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4302 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4303 			      size_t nbytes, loff_t off)
4304 {
4305 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4306 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4307 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4308 	int swappiness = -1;
4309 	unsigned int reclaim_options;
4310 	char *old_buf, *start;
4311 	substring_t args[MAX_OPT_ARGS];
4312 
4313 	buf = strstrip(buf);
4314 
4315 	old_buf = buf;
4316 	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4317 	if (buf == old_buf)
4318 		return -EINVAL;
4319 
4320 	buf = strstrip(buf);
4321 
4322 	while ((start = strsep(&buf, " ")) != NULL) {
4323 		if (!strlen(start))
4324 			continue;
4325 		switch (match_token(start, tokens, args)) {
4326 		case MEMORY_RECLAIM_SWAPPINESS:
4327 			if (match_int(&args[0], &swappiness))
4328 				return -EINVAL;
4329 			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4330 				return -EINVAL;
4331 			break;
4332 		default:
4333 			return -EINVAL;
4334 		}
4335 	}
4336 
4337 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4338 	while (nr_reclaimed < nr_to_reclaim) {
4339 		/* Will converge on zero, but reclaim enforces a minimum */
4340 		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4341 		unsigned long reclaimed;
4342 
4343 		if (signal_pending(current))
4344 			return -EINTR;
4345 
4346 		/*
4347 		 * This is the final attempt, drain percpu lru caches in the
4348 		 * hope of introducing more evictable pages for
4349 		 * try_to_free_mem_cgroup_pages().
4350 		 */
4351 		if (!nr_retries)
4352 			lru_add_drain_all();
4353 
4354 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4355 					batch_size, GFP_KERNEL,
4356 					reclaim_options,
4357 					swappiness == -1 ? NULL : &swappiness);
4358 
4359 		if (!reclaimed && !nr_retries--)
4360 			return -EAGAIN;
4361 
4362 		nr_reclaimed += reclaimed;
4363 	}
4364 
4365 	return nbytes;
4366 }
4367 
4368 static struct cftype memory_files[] = {
4369 	{
4370 		.name = "current",
4371 		.flags = CFTYPE_NOT_ON_ROOT,
4372 		.read_u64 = memory_current_read,
4373 	},
4374 	{
4375 		.name = "peak",
4376 		.flags = CFTYPE_NOT_ON_ROOT,
4377 		.open = peak_open,
4378 		.release = peak_release,
4379 		.seq_show = memory_peak_show,
4380 		.write = memory_peak_write,
4381 	},
4382 	{
4383 		.name = "min",
4384 		.flags = CFTYPE_NOT_ON_ROOT,
4385 		.seq_show = memory_min_show,
4386 		.write = memory_min_write,
4387 	},
4388 	{
4389 		.name = "low",
4390 		.flags = CFTYPE_NOT_ON_ROOT,
4391 		.seq_show = memory_low_show,
4392 		.write = memory_low_write,
4393 	},
4394 	{
4395 		.name = "high",
4396 		.flags = CFTYPE_NOT_ON_ROOT,
4397 		.seq_show = memory_high_show,
4398 		.write = memory_high_write,
4399 	},
4400 	{
4401 		.name = "max",
4402 		.flags = CFTYPE_NOT_ON_ROOT,
4403 		.seq_show = memory_max_show,
4404 		.write = memory_max_write,
4405 	},
4406 	{
4407 		.name = "events",
4408 		.flags = CFTYPE_NOT_ON_ROOT,
4409 		.file_offset = offsetof(struct mem_cgroup, events_file),
4410 		.seq_show = memory_events_show,
4411 	},
4412 	{
4413 		.name = "events.local",
4414 		.flags = CFTYPE_NOT_ON_ROOT,
4415 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4416 		.seq_show = memory_events_local_show,
4417 	},
4418 	{
4419 		.name = "stat",
4420 		.seq_show = memory_stat_show,
4421 	},
4422 #ifdef CONFIG_NUMA
4423 	{
4424 		.name = "numa_stat",
4425 		.seq_show = memory_numa_stat_show,
4426 	},
4427 #endif
4428 	{
4429 		.name = "oom.group",
4430 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4431 		.seq_show = memory_oom_group_show,
4432 		.write = memory_oom_group_write,
4433 	},
4434 	{
4435 		.name = "reclaim",
4436 		.flags = CFTYPE_NS_DELEGATABLE,
4437 		.write = memory_reclaim,
4438 	},
4439 	{ }	/* terminate */
4440 };
4441 
4442 struct cgroup_subsys memory_cgrp_subsys = {
4443 	.css_alloc = mem_cgroup_css_alloc,
4444 	.css_online = mem_cgroup_css_online,
4445 	.css_offline = mem_cgroup_css_offline,
4446 	.css_released = mem_cgroup_css_released,
4447 	.css_free = mem_cgroup_css_free,
4448 	.css_reset = mem_cgroup_css_reset,
4449 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4450 	.attach = mem_cgroup_attach,
4451 	.fork = mem_cgroup_fork,
4452 	.exit = mem_cgroup_exit,
4453 	.dfl_cftypes = memory_files,
4454 #ifdef CONFIG_MEMCG_V1
4455 	.legacy_cftypes = mem_cgroup_legacy_files,
4456 #endif
4457 	.early_init = 0,
4458 };
4459 
4460 /**
4461  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4462  * @root: the top ancestor of the sub-tree being checked
4463  * @memcg: the memory cgroup to check
4464  *
4465  * WARNING: This function is not stateless! It can only be used as part
4466  *          of a top-down tree iteration, not for isolated queries.
4467  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4468 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4469 				     struct mem_cgroup *memcg)
4470 {
4471 	bool recursive_protection =
4472 		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4473 
4474 	if (mem_cgroup_disabled())
4475 		return;
4476 
4477 	if (!root)
4478 		root = root_mem_cgroup;
4479 
4480 	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4481 }
4482 
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4483 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4484 			gfp_t gfp)
4485 {
4486 	int ret;
4487 
4488 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4489 	if (ret)
4490 		goto out;
4491 
4492 	css_get(&memcg->css);
4493 	commit_charge(folio, memcg);
4494 	memcg1_commit_charge(folio, memcg);
4495 out:
4496 	return ret;
4497 }
4498 
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4499 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4500 {
4501 	struct mem_cgroup *memcg;
4502 	int ret;
4503 
4504 	memcg = get_mem_cgroup_from_mm(mm);
4505 	ret = charge_memcg(folio, memcg, gfp);
4506 	css_put(&memcg->css);
4507 
4508 	return ret;
4509 }
4510 
4511 /**
4512  * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4513  * @folio: folio being charged
4514  * @gfp: reclaim mode
4515  *
4516  * This function is called when allocating a huge page folio, after the page has
4517  * already been obtained and charged to the appropriate hugetlb cgroup
4518  * controller (if it is enabled).
4519  *
4520  * Returns ENOMEM if the memcg is already full.
4521  * Returns 0 if either the charge was successful, or if we skip the charging.
4522  */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4523 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4524 {
4525 	struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4526 	int ret = 0;
4527 
4528 	/*
4529 	 * Even memcg does not account for hugetlb, we still want to update
4530 	 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4531 	 * charging the memcg.
4532 	 */
4533 	if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4534 		!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4535 		goto out;
4536 
4537 	if (charge_memcg(folio, memcg, gfp))
4538 		ret = -ENOMEM;
4539 
4540 out:
4541 	mem_cgroup_put(memcg);
4542 	return ret;
4543 }
4544 
4545 /**
4546  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4547  * @folio: folio to charge.
4548  * @mm: mm context of the victim
4549  * @gfp: reclaim mode
4550  * @entry: swap entry for which the folio is allocated
4551  *
4552  * This function charges a folio allocated for swapin. Please call this before
4553  * adding the folio to the swapcache.
4554  *
4555  * Returns 0 on success. Otherwise, an error code is returned.
4556  */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4557 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4558 				  gfp_t gfp, swp_entry_t entry)
4559 {
4560 	struct mem_cgroup *memcg;
4561 	unsigned short id;
4562 	int ret;
4563 
4564 	if (mem_cgroup_disabled())
4565 		return 0;
4566 
4567 	id = lookup_swap_cgroup_id(entry);
4568 	rcu_read_lock();
4569 	memcg = mem_cgroup_from_id(id);
4570 	if (!memcg || !css_tryget_online(&memcg->css))
4571 		memcg = get_mem_cgroup_from_mm(mm);
4572 	rcu_read_unlock();
4573 
4574 	ret = charge_memcg(folio, memcg, gfp);
4575 
4576 	css_put(&memcg->css);
4577 	return ret;
4578 }
4579 
4580 /*
4581  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4582  * @entry: the first swap entry for which the pages are charged
4583  * @nr_pages: number of pages which will be uncharged
4584  *
4585  * Call this function after successfully adding the charged page to swapcache.
4586  *
4587  * Note: This function assumes the page for which swap slot is being uncharged
4588  * is order 0 page.
4589  */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)4590 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
4591 {
4592 	/*
4593 	 * Cgroup1's unified memory+swap counter has been charged with the
4594 	 * new swapcache page, finish the transfer by uncharging the swap
4595 	 * slot. The swap slot would also get uncharged when it dies, but
4596 	 * it can stick around indefinitely and we'd count the page twice
4597 	 * the entire time.
4598 	 *
4599 	 * Cgroup2 has separate resource counters for memory and swap,
4600 	 * so this is a non-issue here. Memory and swap charge lifetimes
4601 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
4602 	 * page to memory here, and uncharge swap when the slot is freed.
4603 	 */
4604 	if (do_memsw_account()) {
4605 		/*
4606 		 * The swap entry might not get freed for a long time,
4607 		 * let's not wait for it.  The page already received a
4608 		 * memory+swap charge, drop the swap entry duplicate.
4609 		 */
4610 		mem_cgroup_uncharge_swap(entry, nr_pages);
4611 	}
4612 }
4613 
4614 struct uncharge_gather {
4615 	struct mem_cgroup *memcg;
4616 	unsigned long nr_memory;
4617 	unsigned long pgpgout;
4618 	unsigned long nr_kmem;
4619 	int nid;
4620 };
4621 
uncharge_gather_clear(struct uncharge_gather * ug)4622 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4623 {
4624 	memset(ug, 0, sizeof(*ug));
4625 }
4626 
uncharge_batch(const struct uncharge_gather * ug)4627 static void uncharge_batch(const struct uncharge_gather *ug)
4628 {
4629 	if (ug->nr_memory) {
4630 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4631 		if (do_memsw_account())
4632 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4633 		if (ug->nr_kmem) {
4634 			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4635 			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4636 		}
4637 		memcg1_oom_recover(ug->memcg);
4638 	}
4639 
4640 	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4641 
4642 	/* drop reference from uncharge_folio */
4643 	css_put(&ug->memcg->css);
4644 }
4645 
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4646 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4647 {
4648 	long nr_pages;
4649 	struct mem_cgroup *memcg;
4650 	struct obj_cgroup *objcg;
4651 
4652 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4653 
4654 	/*
4655 	 * Nobody should be changing or seriously looking at
4656 	 * folio memcg or objcg at this point, we have fully
4657 	 * exclusive access to the folio.
4658 	 */
4659 	if (folio_memcg_kmem(folio)) {
4660 		objcg = __folio_objcg(folio);
4661 		/*
4662 		 * This get matches the put at the end of the function and
4663 		 * kmem pages do not hold memcg references anymore.
4664 		 */
4665 		memcg = get_mem_cgroup_from_objcg(objcg);
4666 	} else {
4667 		memcg = __folio_memcg(folio);
4668 	}
4669 
4670 	if (!memcg)
4671 		return;
4672 
4673 	if (ug->memcg != memcg) {
4674 		if (ug->memcg) {
4675 			uncharge_batch(ug);
4676 			uncharge_gather_clear(ug);
4677 		}
4678 		ug->memcg = memcg;
4679 		ug->nid = folio_nid(folio);
4680 
4681 		/* pairs with css_put in uncharge_batch */
4682 		css_get(&memcg->css);
4683 	}
4684 
4685 	nr_pages = folio_nr_pages(folio);
4686 
4687 	if (folio_memcg_kmem(folio)) {
4688 		ug->nr_memory += nr_pages;
4689 		ug->nr_kmem += nr_pages;
4690 
4691 		folio->memcg_data = 0;
4692 		obj_cgroup_put(objcg);
4693 	} else {
4694 		/* LRU pages aren't accounted at the root level */
4695 		if (!mem_cgroup_is_root(memcg))
4696 			ug->nr_memory += nr_pages;
4697 		ug->pgpgout++;
4698 
4699 		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4700 		folio->memcg_data = 0;
4701 	}
4702 
4703 	css_put(&memcg->css);
4704 }
4705 
__mem_cgroup_uncharge(struct folio * folio)4706 void __mem_cgroup_uncharge(struct folio *folio)
4707 {
4708 	struct uncharge_gather ug;
4709 
4710 	/* Don't touch folio->lru of any random page, pre-check: */
4711 	if (!folio_memcg_charged(folio))
4712 		return;
4713 
4714 	uncharge_gather_clear(&ug);
4715 	uncharge_folio(folio, &ug);
4716 	uncharge_batch(&ug);
4717 }
4718 
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4719 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4720 {
4721 	struct uncharge_gather ug;
4722 	unsigned int i;
4723 
4724 	uncharge_gather_clear(&ug);
4725 	for (i = 0; i < folios->nr; i++)
4726 		uncharge_folio(folios->folios[i], &ug);
4727 	if (ug.memcg)
4728 		uncharge_batch(&ug);
4729 }
4730 
4731 /**
4732  * mem_cgroup_replace_folio - Charge a folio's replacement.
4733  * @old: Currently circulating folio.
4734  * @new: Replacement folio.
4735  *
4736  * Charge @new as a replacement folio for @old. @old will
4737  * be uncharged upon free.
4738  *
4739  * Both folios must be locked, @new->mapping must be set up.
4740  */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4741 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4742 {
4743 	struct mem_cgroup *memcg;
4744 	long nr_pages = folio_nr_pages(new);
4745 
4746 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4747 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4748 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4749 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4750 
4751 	if (mem_cgroup_disabled())
4752 		return;
4753 
4754 	/* Page cache replacement: new folio already charged? */
4755 	if (folio_memcg_charged(new))
4756 		return;
4757 
4758 	memcg = folio_memcg(old);
4759 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4760 	if (!memcg)
4761 		return;
4762 
4763 	/* Force-charge the new page. The old one will be freed soon */
4764 	if (!mem_cgroup_is_root(memcg)) {
4765 		page_counter_charge(&memcg->memory, nr_pages);
4766 		if (do_memsw_account())
4767 			page_counter_charge(&memcg->memsw, nr_pages);
4768 	}
4769 
4770 	css_get(&memcg->css);
4771 	commit_charge(new, memcg);
4772 	memcg1_commit_charge(new, memcg);
4773 }
4774 
4775 /**
4776  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4777  * @old: Currently circulating folio.
4778  * @new: Replacement folio.
4779  *
4780  * Transfer the memcg data from the old folio to the new folio for migration.
4781  * The old folio's data info will be cleared. Note that the memory counters
4782  * will remain unchanged throughout the process.
4783  *
4784  * Both folios must be locked, @new->mapping must be set up.
4785  */
mem_cgroup_migrate(struct folio * old,struct folio * new)4786 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4787 {
4788 	struct mem_cgroup *memcg;
4789 
4790 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4791 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4792 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4793 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4794 	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4795 
4796 	if (mem_cgroup_disabled())
4797 		return;
4798 
4799 	memcg = folio_memcg(old);
4800 	/*
4801 	 * Note that it is normal to see !memcg for a hugetlb folio.
4802 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4803 	 * was not selected.
4804 	 */
4805 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4806 	if (!memcg)
4807 		return;
4808 
4809 	/* Transfer the charge and the css ref */
4810 	commit_charge(new, memcg);
4811 
4812 	/* Warning should never happen, so don't worry about refcount non-0 */
4813 	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4814 	old->memcg_data = 0;
4815 }
4816 
4817 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4818 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4819 
mem_cgroup_sk_alloc(struct sock * sk)4820 void mem_cgroup_sk_alloc(struct sock *sk)
4821 {
4822 	struct mem_cgroup *memcg;
4823 
4824 	if (!mem_cgroup_sockets_enabled)
4825 		return;
4826 
4827 	/* Do not associate the sock with unrelated interrupted task's memcg. */
4828 	if (!in_task())
4829 		return;
4830 
4831 	rcu_read_lock();
4832 	memcg = mem_cgroup_from_task(current);
4833 	if (mem_cgroup_is_root(memcg))
4834 		goto out;
4835 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4836 		goto out;
4837 	if (css_tryget(&memcg->css))
4838 		sk->sk_memcg = memcg;
4839 out:
4840 	rcu_read_unlock();
4841 }
4842 
mem_cgroup_sk_free(struct sock * sk)4843 void mem_cgroup_sk_free(struct sock *sk)
4844 {
4845 	if (sk->sk_memcg)
4846 		css_put(&sk->sk_memcg->css);
4847 }
4848 
4849 /**
4850  * mem_cgroup_charge_skmem - charge socket memory
4851  * @memcg: memcg to charge
4852  * @nr_pages: number of pages to charge
4853  * @gfp_mask: reclaim mode
4854  *
4855  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4856  * @memcg's configured limit, %false if it doesn't.
4857  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4858 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4859 			     gfp_t gfp_mask)
4860 {
4861 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4862 		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4863 
4864 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4865 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4866 		return true;
4867 	}
4868 
4869 	return false;
4870 }
4871 
4872 /**
4873  * mem_cgroup_uncharge_skmem - uncharge socket memory
4874  * @memcg: memcg to uncharge
4875  * @nr_pages: number of pages to uncharge
4876  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4877 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4878 {
4879 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4880 		memcg1_uncharge_skmem(memcg, nr_pages);
4881 		return;
4882 	}
4883 
4884 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4885 
4886 	refill_stock(memcg, nr_pages);
4887 }
4888 
cgroup_memory(char * s)4889 static int __init cgroup_memory(char *s)
4890 {
4891 	char *token;
4892 
4893 	while ((token = strsep(&s, ",")) != NULL) {
4894 		if (!*token)
4895 			continue;
4896 		if (!strcmp(token, "nosocket"))
4897 			cgroup_memory_nosocket = true;
4898 		if (!strcmp(token, "nokmem"))
4899 			cgroup_memory_nokmem = true;
4900 		if (!strcmp(token, "nobpf"))
4901 			cgroup_memory_nobpf = true;
4902 	}
4903 	return 1;
4904 }
4905 __setup("cgroup.memory=", cgroup_memory);
4906 
4907 /*
4908  * subsys_initcall() for memory controller.
4909  *
4910  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4911  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4912  * basically everything that doesn't depend on a specific mem_cgroup structure
4913  * should be initialized from here.
4914  */
mem_cgroup_init(void)4915 static int __init mem_cgroup_init(void)
4916 {
4917 	int cpu;
4918 
4919 	/*
4920 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4921 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
4922 	 * to work fine, we should make sure that the overfill threshold can't
4923 	 * exceed S32_MAX / PAGE_SIZE.
4924 	 */
4925 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4926 
4927 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4928 				  memcg_hotplug_cpu_dead);
4929 
4930 	for_each_possible_cpu(cpu)
4931 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4932 			  drain_local_stock);
4933 
4934 	return 0;
4935 }
4936 subsys_initcall(mem_cgroup_init);
4937 
4938 #ifdef CONFIG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)4939 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
4940 {
4941 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
4942 		/*
4943 		 * The root cgroup cannot be destroyed, so it's refcount must
4944 		 * always be >= 1.
4945 		 */
4946 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4947 			VM_BUG_ON(1);
4948 			break;
4949 		}
4950 		memcg = parent_mem_cgroup(memcg);
4951 		if (!memcg)
4952 			memcg = root_mem_cgroup;
4953 	}
4954 	return memcg;
4955 }
4956 
4957 /**
4958  * mem_cgroup_swapout - transfer a memsw charge to swap
4959  * @folio: folio whose memsw charge to transfer
4960  * @entry: swap entry to move the charge to
4961  *
4962  * Transfer the memsw charge of @folio to @entry.
4963  */
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)4964 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4965 {
4966 	struct mem_cgroup *memcg, *swap_memcg;
4967 	unsigned int nr_entries;
4968 
4969 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4970 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
4971 
4972 	if (mem_cgroup_disabled())
4973 		return;
4974 
4975 	if (!do_memsw_account())
4976 		return;
4977 
4978 	memcg = folio_memcg(folio);
4979 
4980 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4981 	if (!memcg)
4982 		return;
4983 
4984 	/*
4985 	 * In case the memcg owning these pages has been offlined and doesn't
4986 	 * have an ID allocated to it anymore, charge the closest online
4987 	 * ancestor for the swap instead and transfer the memory+swap charge.
4988 	 */
4989 	swap_memcg = mem_cgroup_id_get_online(memcg);
4990 	nr_entries = folio_nr_pages(folio);
4991 	/* Get references for the tail pages, too */
4992 	if (nr_entries > 1)
4993 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
4994 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
4995 
4996 	swap_cgroup_record(folio, entry);
4997 
4998 	folio_unqueue_deferred_split(folio);
4999 	folio->memcg_data = 0;
5000 
5001 	if (!mem_cgroup_is_root(memcg))
5002 		page_counter_uncharge(&memcg->memory, nr_entries);
5003 
5004 	if (memcg != swap_memcg) {
5005 		if (!mem_cgroup_is_root(swap_memcg))
5006 			page_counter_charge(&swap_memcg->memsw, nr_entries);
5007 		page_counter_uncharge(&memcg->memsw, nr_entries);
5008 	}
5009 
5010 	memcg1_swapout(folio, memcg);
5011 	css_put(&memcg->css);
5012 }
5013 
5014 /**
5015  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5016  * @folio: folio being added to swap
5017  * @entry: swap entry to charge
5018  *
5019  * Try to charge @folio's memcg for the swap space at @entry.
5020  *
5021  * Returns 0 on success, -ENOMEM on failure.
5022  */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5023 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5024 {
5025 	unsigned int nr_pages = folio_nr_pages(folio);
5026 	struct page_counter *counter;
5027 	struct mem_cgroup *memcg;
5028 
5029 	if (do_memsw_account())
5030 		return 0;
5031 
5032 	memcg = folio_memcg(folio);
5033 
5034 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5035 	if (!memcg)
5036 		return 0;
5037 
5038 	if (!entry.val) {
5039 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5040 		return 0;
5041 	}
5042 
5043 	memcg = mem_cgroup_id_get_online(memcg);
5044 
5045 	if (!mem_cgroup_is_root(memcg) &&
5046 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5047 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5048 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5049 		mem_cgroup_id_put(memcg);
5050 		return -ENOMEM;
5051 	}
5052 
5053 	/* Get references for the tail pages, too */
5054 	if (nr_pages > 1)
5055 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5056 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5057 
5058 	swap_cgroup_record(folio, entry);
5059 
5060 	return 0;
5061 }
5062 
5063 /**
5064  * __mem_cgroup_uncharge_swap - uncharge swap space
5065  * @entry: swap entry to uncharge
5066  * @nr_pages: the amount of swap space to uncharge
5067  */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5068 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5069 {
5070 	struct mem_cgroup *memcg;
5071 	unsigned short id;
5072 
5073 	id = swap_cgroup_clear(entry, nr_pages);
5074 	rcu_read_lock();
5075 	memcg = mem_cgroup_from_id(id);
5076 	if (memcg) {
5077 		if (!mem_cgroup_is_root(memcg)) {
5078 			if (do_memsw_account())
5079 				page_counter_uncharge(&memcg->memsw, nr_pages);
5080 			else
5081 				page_counter_uncharge(&memcg->swap, nr_pages);
5082 		}
5083 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5084 		mem_cgroup_id_put_many(memcg, nr_pages);
5085 	}
5086 	rcu_read_unlock();
5087 }
5088 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5089 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5090 {
5091 	long nr_swap_pages = get_nr_swap_pages();
5092 
5093 	if (mem_cgroup_disabled() || do_memsw_account())
5094 		return nr_swap_pages;
5095 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5096 		nr_swap_pages = min_t(long, nr_swap_pages,
5097 				      READ_ONCE(memcg->swap.max) -
5098 				      page_counter_read(&memcg->swap));
5099 	return nr_swap_pages;
5100 }
5101 
mem_cgroup_swap_full(struct folio * folio)5102 bool mem_cgroup_swap_full(struct folio *folio)
5103 {
5104 	struct mem_cgroup *memcg;
5105 
5106 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5107 
5108 	if (vm_swap_full())
5109 		return true;
5110 	if (do_memsw_account())
5111 		return false;
5112 
5113 	memcg = folio_memcg(folio);
5114 	if (!memcg)
5115 		return false;
5116 
5117 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5118 		unsigned long usage = page_counter_read(&memcg->swap);
5119 
5120 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5121 		    usage * 2 >= READ_ONCE(memcg->swap.max))
5122 			return true;
5123 	}
5124 
5125 	return false;
5126 }
5127 
setup_swap_account(char * s)5128 static int __init setup_swap_account(char *s)
5129 {
5130 	bool res;
5131 
5132 	if (!kstrtobool(s, &res) && !res)
5133 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5134 			     "in favor of configuring swap control via cgroupfs. "
5135 			     "Please report your usecase to linux-mm@kvack.org if you "
5136 			     "depend on this functionality.\n");
5137 	return 1;
5138 }
5139 __setup("swapaccount=", setup_swap_account);
5140 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5141 static u64 swap_current_read(struct cgroup_subsys_state *css,
5142 			     struct cftype *cft)
5143 {
5144 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5145 
5146 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5147 }
5148 
swap_peak_show(struct seq_file * sf,void * v)5149 static int swap_peak_show(struct seq_file *sf, void *v)
5150 {
5151 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5152 
5153 	return peak_show(sf, v, &memcg->swap);
5154 }
5155 
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5156 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5157 			       size_t nbytes, loff_t off)
5158 {
5159 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5160 
5161 	return peak_write(of, buf, nbytes, off, &memcg->swap,
5162 			  &memcg->swap_peaks);
5163 }
5164 
swap_high_show(struct seq_file * m,void * v)5165 static int swap_high_show(struct seq_file *m, void *v)
5166 {
5167 	return seq_puts_memcg_tunable(m,
5168 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5169 }
5170 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5171 static ssize_t swap_high_write(struct kernfs_open_file *of,
5172 			       char *buf, size_t nbytes, loff_t off)
5173 {
5174 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5175 	unsigned long high;
5176 	int err;
5177 
5178 	buf = strstrip(buf);
5179 	err = page_counter_memparse(buf, "max", &high);
5180 	if (err)
5181 		return err;
5182 
5183 	page_counter_set_high(&memcg->swap, high);
5184 
5185 	return nbytes;
5186 }
5187 
swap_max_show(struct seq_file * m,void * v)5188 static int swap_max_show(struct seq_file *m, void *v)
5189 {
5190 	return seq_puts_memcg_tunable(m,
5191 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5192 }
5193 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5194 static ssize_t swap_max_write(struct kernfs_open_file *of,
5195 			      char *buf, size_t nbytes, loff_t off)
5196 {
5197 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5198 	unsigned long max;
5199 	int err;
5200 
5201 	buf = strstrip(buf);
5202 	err = page_counter_memparse(buf, "max", &max);
5203 	if (err)
5204 		return err;
5205 
5206 	xchg(&memcg->swap.max, max);
5207 
5208 	return nbytes;
5209 }
5210 
swap_events_show(struct seq_file * m,void * v)5211 static int swap_events_show(struct seq_file *m, void *v)
5212 {
5213 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5214 
5215 	seq_printf(m, "high %lu\n",
5216 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5217 	seq_printf(m, "max %lu\n",
5218 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5219 	seq_printf(m, "fail %lu\n",
5220 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5221 
5222 	return 0;
5223 }
5224 
5225 static struct cftype swap_files[] = {
5226 	{
5227 		.name = "swap.current",
5228 		.flags = CFTYPE_NOT_ON_ROOT,
5229 		.read_u64 = swap_current_read,
5230 	},
5231 	{
5232 		.name = "swap.high",
5233 		.flags = CFTYPE_NOT_ON_ROOT,
5234 		.seq_show = swap_high_show,
5235 		.write = swap_high_write,
5236 	},
5237 	{
5238 		.name = "swap.max",
5239 		.flags = CFTYPE_NOT_ON_ROOT,
5240 		.seq_show = swap_max_show,
5241 		.write = swap_max_write,
5242 	},
5243 	{
5244 		.name = "swap.peak",
5245 		.flags = CFTYPE_NOT_ON_ROOT,
5246 		.open = peak_open,
5247 		.release = peak_release,
5248 		.seq_show = swap_peak_show,
5249 		.write = swap_peak_write,
5250 	},
5251 	{
5252 		.name = "swap.events",
5253 		.flags = CFTYPE_NOT_ON_ROOT,
5254 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5255 		.seq_show = swap_events_show,
5256 	},
5257 	{ }	/* terminate */
5258 };
5259 
5260 #ifdef CONFIG_ZSWAP
5261 /**
5262  * obj_cgroup_may_zswap - check if this cgroup can zswap
5263  * @objcg: the object cgroup
5264  *
5265  * Check if the hierarchical zswap limit has been reached.
5266  *
5267  * This doesn't check for specific headroom, and it is not atomic
5268  * either. But with zswap, the size of the allocation is only known
5269  * once compression has occurred, and this optimistic pre-check avoids
5270  * spending cycles on compression when there is already no room left
5271  * or zswap is disabled altogether somewhere in the hierarchy.
5272  */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5273 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5274 {
5275 	struct mem_cgroup *memcg, *original_memcg;
5276 	bool ret = true;
5277 
5278 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5279 		return true;
5280 
5281 	original_memcg = get_mem_cgroup_from_objcg(objcg);
5282 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5283 	     memcg = parent_mem_cgroup(memcg)) {
5284 		unsigned long max = READ_ONCE(memcg->zswap_max);
5285 		unsigned long pages;
5286 
5287 		if (max == PAGE_COUNTER_MAX)
5288 			continue;
5289 		if (max == 0) {
5290 			ret = false;
5291 			break;
5292 		}
5293 
5294 		/* Force flush to get accurate stats for charging */
5295 		__mem_cgroup_flush_stats(memcg, true);
5296 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5297 		if (pages < max)
5298 			continue;
5299 		ret = false;
5300 		break;
5301 	}
5302 	mem_cgroup_put(original_memcg);
5303 	return ret;
5304 }
5305 
5306 /**
5307  * obj_cgroup_charge_zswap - charge compression backend memory
5308  * @objcg: the object cgroup
5309  * @size: size of compressed object
5310  *
5311  * This forces the charge after obj_cgroup_may_zswap() allowed
5312  * compression and storage in zwap for this cgroup to go ahead.
5313  */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5314 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5315 {
5316 	struct mem_cgroup *memcg;
5317 
5318 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5319 		return;
5320 
5321 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5322 
5323 	/* PF_MEMALLOC context, charging must succeed */
5324 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5325 		VM_WARN_ON_ONCE(1);
5326 
5327 	rcu_read_lock();
5328 	memcg = obj_cgroup_memcg(objcg);
5329 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5330 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5331 	rcu_read_unlock();
5332 }
5333 
5334 /**
5335  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5336  * @objcg: the object cgroup
5337  * @size: size of compressed object
5338  *
5339  * Uncharges zswap memory on page in.
5340  */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5341 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5342 {
5343 	struct mem_cgroup *memcg;
5344 
5345 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5346 		return;
5347 
5348 	obj_cgroup_uncharge(objcg, size);
5349 
5350 	rcu_read_lock();
5351 	memcg = obj_cgroup_memcg(objcg);
5352 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5353 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5354 	rcu_read_unlock();
5355 }
5356 
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5357 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5358 {
5359 	/* if zswap is disabled, do not block pages going to the swapping device */
5360 	if (!zswap_is_enabled())
5361 		return true;
5362 
5363 	for (; memcg; memcg = parent_mem_cgroup(memcg))
5364 		if (!READ_ONCE(memcg->zswap_writeback))
5365 			return false;
5366 
5367 	return true;
5368 }
5369 
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5370 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5371 			      struct cftype *cft)
5372 {
5373 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5374 
5375 	mem_cgroup_flush_stats(memcg);
5376 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5377 }
5378 
zswap_max_show(struct seq_file * m,void * v)5379 static int zswap_max_show(struct seq_file *m, void *v)
5380 {
5381 	return seq_puts_memcg_tunable(m,
5382 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5383 }
5384 
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5385 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5386 			       char *buf, size_t nbytes, loff_t off)
5387 {
5388 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5389 	unsigned long max;
5390 	int err;
5391 
5392 	buf = strstrip(buf);
5393 	err = page_counter_memparse(buf, "max", &max);
5394 	if (err)
5395 		return err;
5396 
5397 	xchg(&memcg->zswap_max, max);
5398 
5399 	return nbytes;
5400 }
5401 
zswap_writeback_show(struct seq_file * m,void * v)5402 static int zswap_writeback_show(struct seq_file *m, void *v)
5403 {
5404 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5405 
5406 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5407 	return 0;
5408 }
5409 
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5410 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5411 				char *buf, size_t nbytes, loff_t off)
5412 {
5413 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5414 	int zswap_writeback;
5415 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5416 
5417 	if (parse_ret)
5418 		return parse_ret;
5419 
5420 	if (zswap_writeback != 0 && zswap_writeback != 1)
5421 		return -EINVAL;
5422 
5423 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5424 	return nbytes;
5425 }
5426 
5427 static struct cftype zswap_files[] = {
5428 	{
5429 		.name = "zswap.current",
5430 		.flags = CFTYPE_NOT_ON_ROOT,
5431 		.read_u64 = zswap_current_read,
5432 	},
5433 	{
5434 		.name = "zswap.max",
5435 		.flags = CFTYPE_NOT_ON_ROOT,
5436 		.seq_show = zswap_max_show,
5437 		.write = zswap_max_write,
5438 	},
5439 	{
5440 		.name = "zswap.writeback",
5441 		.seq_show = zswap_writeback_show,
5442 		.write = zswap_writeback_write,
5443 	},
5444 	{ }	/* terminate */
5445 };
5446 #endif /* CONFIG_ZSWAP */
5447 
mem_cgroup_swap_init(void)5448 static int __init mem_cgroup_swap_init(void)
5449 {
5450 	if (mem_cgroup_disabled())
5451 		return 0;
5452 
5453 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5454 #ifdef CONFIG_MEMCG_V1
5455 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5456 #endif
5457 #ifdef CONFIG_ZSWAP
5458 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5459 #endif
5460 	return 0;
5461 }
5462 subsys_initcall(mem_cgroup_swap_init);
5463 
5464 #endif /* CONFIG_SWAP */
5465