xref: /linux/mm/memcontrol.c (revision 76b6905c11fd3c6dc4562aefc3e8c4429fefae1e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/pagevec.h>
37 #include <linux/vm_event_item.h>
38 #include <linux/smp.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
41 #include <linux/bit_spinlock.h>
42 #include <linux/rcupdate.h>
43 #include <linux/limits.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/mutex.h>
47 #include <linux/rbtree.h>
48 #include <linux/slab.h>
49 #include <linux/swapops.h>
50 #include <linux/spinlock.h>
51 #include <linux/fs.h>
52 #include <linux/seq_file.h>
53 #include <linux/parser.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71 
72 #include <linux/uaccess.h>
73 
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77 
78 #include <trace/events/vmscan.h>
79 
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82 
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
88 
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
91 
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
94 
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
97 
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101 
task_is_dying(void)102 static inline bool task_is_dying(void)
103 {
104 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
105 		(current->flags & PF_EXITING);
106 }
107 
108 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
110 {
111 	if (!memcg)
112 		memcg = root_mem_cgroup;
113 	return &memcg->vmpressure;
114 }
115 
vmpressure_to_memcg(struct vmpressure * vmpr)116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
117 {
118 	return container_of(vmpr, struct mem_cgroup, vmpressure);
119 }
120 
121 #define SEQ_BUF_SIZE SZ_4K
122 #define CURRENT_OBJCG_UPDATE_BIT 0
123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
124 
125 static DEFINE_SPINLOCK(objcg_lock);
126 
mem_cgroup_kmem_disabled(void)127 bool mem_cgroup_kmem_disabled(void)
128 {
129 	return cgroup_memory_nokmem;
130 }
131 
132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
133 				      unsigned int nr_pages);
134 
obj_cgroup_release(struct percpu_ref * ref)135 static void obj_cgroup_release(struct percpu_ref *ref)
136 {
137 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
138 	unsigned int nr_bytes;
139 	unsigned int nr_pages;
140 	unsigned long flags;
141 
142 	/*
143 	 * At this point all allocated objects are freed, and
144 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
145 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
146 	 *
147 	 * The following sequence can lead to it:
148 	 * 1) CPU0: objcg == stock->cached_objcg
149 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
150 	 *          PAGE_SIZE bytes are charged
151 	 * 3) CPU1: a process from another memcg is allocating something,
152 	 *          the stock if flushed,
153 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
154 	 * 5) CPU0: we do release this object,
155 	 *          92 bytes are added to stock->nr_bytes
156 	 * 6) CPU0: stock is flushed,
157 	 *          92 bytes are added to objcg->nr_charged_bytes
158 	 *
159 	 * In the result, nr_charged_bytes == PAGE_SIZE.
160 	 * This page will be uncharged in obj_cgroup_release().
161 	 */
162 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
163 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
164 	nr_pages = nr_bytes >> PAGE_SHIFT;
165 
166 	if (nr_pages)
167 		obj_cgroup_uncharge_pages(objcg, nr_pages);
168 
169 	spin_lock_irqsave(&objcg_lock, flags);
170 	list_del(&objcg->list);
171 	spin_unlock_irqrestore(&objcg_lock, flags);
172 
173 	percpu_ref_exit(ref);
174 	kfree_rcu(objcg, rcu);
175 }
176 
obj_cgroup_alloc(void)177 static struct obj_cgroup *obj_cgroup_alloc(void)
178 {
179 	struct obj_cgroup *objcg;
180 	int ret;
181 
182 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
183 	if (!objcg)
184 		return NULL;
185 
186 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
187 			      GFP_KERNEL);
188 	if (ret) {
189 		kfree(objcg);
190 		return NULL;
191 	}
192 	INIT_LIST_HEAD(&objcg->list);
193 	return objcg;
194 }
195 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
197 				  struct mem_cgroup *parent)
198 {
199 	struct obj_cgroup *objcg, *iter;
200 
201 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
202 
203 	spin_lock_irq(&objcg_lock);
204 
205 	/* 1) Ready to reparent active objcg. */
206 	list_add(&objcg->list, &memcg->objcg_list);
207 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
208 	list_for_each_entry(iter, &memcg->objcg_list, list)
209 		WRITE_ONCE(iter->memcg, parent);
210 	/* 3) Move already reparented objcgs to the parent's list */
211 	list_splice(&memcg->objcg_list, &parent->objcg_list);
212 
213 	spin_unlock_irq(&objcg_lock);
214 
215 	percpu_ref_kill(&objcg->refcnt);
216 }
217 
218 /*
219  * A lot of the calls to the cache allocation functions are expected to be
220  * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
221  * conditional to this static branch, we'll have to allow modules that does
222  * kmem_cache_alloc and the such to see this symbol as well
223  */
224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
225 EXPORT_SYMBOL(memcg_kmem_online_key);
226 
227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
228 EXPORT_SYMBOL(memcg_bpf_enabled_key);
229 
230 /**
231  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
232  * @folio: folio of interest
233  *
234  * If memcg is bound to the default hierarchy, css of the memcg associated
235  * with @folio is returned.  The returned css remains associated with @folio
236  * until it is released.
237  *
238  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
239  * is returned.
240  */
mem_cgroup_css_from_folio(struct folio * folio)241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
242 {
243 	struct mem_cgroup *memcg = folio_memcg(folio);
244 
245 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
246 		memcg = root_mem_cgroup;
247 
248 	return &memcg->css;
249 }
250 
251 /**
252  * page_cgroup_ino - return inode number of the memcg a page is charged to
253  * @page: the page
254  *
255  * Look up the closest online ancestor of the memory cgroup @page is charged to
256  * and return its inode number or 0 if @page is not charged to any cgroup. It
257  * is safe to call this function without holding a reference to @page.
258  *
259  * Note, this function is inherently racy, because there is nothing to prevent
260  * the cgroup inode from getting torn down and potentially reallocated a moment
261  * after page_cgroup_ino() returns, so it only should be used by callers that
262  * do not care (such as procfs interfaces).
263  */
page_cgroup_ino(struct page * page)264 ino_t page_cgroup_ino(struct page *page)
265 {
266 	struct mem_cgroup *memcg;
267 	unsigned long ino = 0;
268 
269 	rcu_read_lock();
270 	/* page_folio() is racy here, but the entire function is racy anyway */
271 	memcg = folio_memcg_check(page_folio(page));
272 
273 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
274 		memcg = parent_mem_cgroup(memcg);
275 	if (memcg)
276 		ino = cgroup_ino(memcg->css.cgroup);
277 	rcu_read_unlock();
278 	return ino;
279 }
280 
281 /* Subset of node_stat_item for memcg stats */
282 static const unsigned int memcg_node_stat_items[] = {
283 	NR_INACTIVE_ANON,
284 	NR_ACTIVE_ANON,
285 	NR_INACTIVE_FILE,
286 	NR_ACTIVE_FILE,
287 	NR_UNEVICTABLE,
288 	NR_SLAB_RECLAIMABLE_B,
289 	NR_SLAB_UNRECLAIMABLE_B,
290 	WORKINGSET_REFAULT_ANON,
291 	WORKINGSET_REFAULT_FILE,
292 	WORKINGSET_ACTIVATE_ANON,
293 	WORKINGSET_ACTIVATE_FILE,
294 	WORKINGSET_RESTORE_ANON,
295 	WORKINGSET_RESTORE_FILE,
296 	WORKINGSET_NODERECLAIM,
297 	NR_ANON_MAPPED,
298 	NR_FILE_MAPPED,
299 	NR_FILE_PAGES,
300 	NR_FILE_DIRTY,
301 	NR_WRITEBACK,
302 	NR_SHMEM,
303 	NR_SHMEM_THPS,
304 	NR_FILE_THPS,
305 	NR_ANON_THPS,
306 	NR_KERNEL_STACK_KB,
307 	NR_PAGETABLE,
308 	NR_SECONDARY_PAGETABLE,
309 #ifdef CONFIG_SWAP
310 	NR_SWAPCACHE,
311 #endif
312 #ifdef CONFIG_NUMA_BALANCING
313 	PGPROMOTE_SUCCESS,
314 #endif
315 	PGDEMOTE_KSWAPD,
316 	PGDEMOTE_DIRECT,
317 	PGDEMOTE_KHUGEPAGED,
318 #ifdef CONFIG_HUGETLB_PAGE
319 	NR_HUGETLB,
320 #endif
321 };
322 
323 static const unsigned int memcg_stat_items[] = {
324 	MEMCG_SWAP,
325 	MEMCG_SOCK,
326 	MEMCG_PERCPU_B,
327 	MEMCG_VMALLOC,
328 	MEMCG_KMEM,
329 	MEMCG_ZSWAP_B,
330 	MEMCG_ZSWAPPED,
331 };
332 
333 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
334 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
335 			   ARRAY_SIZE(memcg_stat_items))
336 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
337 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
338 
init_memcg_stats(void)339 static void init_memcg_stats(void)
340 {
341 	u8 i, j = 0;
342 
343 	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
344 
345 	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
346 
347 	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
348 		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
349 
350 	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
351 		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
352 }
353 
memcg_stats_index(int idx)354 static inline int memcg_stats_index(int idx)
355 {
356 	return mem_cgroup_stats_index[idx];
357 }
358 
359 struct lruvec_stats_percpu {
360 	/* Local (CPU and cgroup) state */
361 	long state[NR_MEMCG_NODE_STAT_ITEMS];
362 
363 	/* Delta calculation for lockless upward propagation */
364 	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
365 };
366 
367 struct lruvec_stats {
368 	/* Aggregated (CPU and subtree) state */
369 	long state[NR_MEMCG_NODE_STAT_ITEMS];
370 
371 	/* Non-hierarchical (CPU aggregated) state */
372 	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
373 
374 	/* Pending child counts during tree propagation */
375 	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
376 };
377 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)378 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
379 {
380 	struct mem_cgroup_per_node *pn;
381 	long x;
382 	int i;
383 
384 	if (mem_cgroup_disabled())
385 		return node_page_state(lruvec_pgdat(lruvec), idx);
386 
387 	i = memcg_stats_index(idx);
388 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
389 		return 0;
390 
391 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
392 	x = READ_ONCE(pn->lruvec_stats->state[i]);
393 #ifdef CONFIG_SMP
394 	if (x < 0)
395 		x = 0;
396 #endif
397 	return x;
398 }
399 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)400 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
401 				      enum node_stat_item idx)
402 {
403 	struct mem_cgroup_per_node *pn;
404 	long x;
405 	int i;
406 
407 	if (mem_cgroup_disabled())
408 		return node_page_state(lruvec_pgdat(lruvec), idx);
409 
410 	i = memcg_stats_index(idx);
411 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
412 		return 0;
413 
414 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
415 	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
416 #ifdef CONFIG_SMP
417 	if (x < 0)
418 		x = 0;
419 #endif
420 	return x;
421 }
422 
423 /* Subset of vm_event_item to report for memcg event stats */
424 static const unsigned int memcg_vm_event_stat[] = {
425 #ifdef CONFIG_MEMCG_V1
426 	PGPGIN,
427 	PGPGOUT,
428 #endif
429 	PSWPIN,
430 	PSWPOUT,
431 	PGSCAN_KSWAPD,
432 	PGSCAN_DIRECT,
433 	PGSCAN_KHUGEPAGED,
434 	PGSTEAL_KSWAPD,
435 	PGSTEAL_DIRECT,
436 	PGSTEAL_KHUGEPAGED,
437 	PGFAULT,
438 	PGMAJFAULT,
439 	PGREFILL,
440 	PGACTIVATE,
441 	PGDEACTIVATE,
442 	PGLAZYFREE,
443 	PGLAZYFREED,
444 #ifdef CONFIG_SWAP
445 	SWPIN_ZERO,
446 	SWPOUT_ZERO,
447 #endif
448 #ifdef CONFIG_ZSWAP
449 	ZSWPIN,
450 	ZSWPOUT,
451 	ZSWPWB,
452 #endif
453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
454 	THP_FAULT_ALLOC,
455 	THP_COLLAPSE_ALLOC,
456 	THP_SWPOUT,
457 	THP_SWPOUT_FALLBACK,
458 #endif
459 #ifdef CONFIG_NUMA_BALANCING
460 	NUMA_PAGE_MIGRATE,
461 	NUMA_PTE_UPDATES,
462 	NUMA_HINT_FAULTS,
463 #endif
464 };
465 
466 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
467 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
468 
init_memcg_events(void)469 static void init_memcg_events(void)
470 {
471 	u8 i;
472 
473 	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
474 
475 	memset(mem_cgroup_events_index, U8_MAX,
476 	       sizeof(mem_cgroup_events_index));
477 
478 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
479 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
480 }
481 
memcg_events_index(enum vm_event_item idx)482 static inline int memcg_events_index(enum vm_event_item idx)
483 {
484 	return mem_cgroup_events_index[idx];
485 }
486 
487 struct memcg_vmstats_percpu {
488 	/* Stats updates since the last flush */
489 	unsigned int			stats_updates;
490 
491 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
492 	struct memcg_vmstats_percpu	*parent;
493 	struct memcg_vmstats		*vmstats;
494 
495 	/* The above should fit a single cacheline for memcg_rstat_updated() */
496 
497 	/* Local (CPU and cgroup) page state & events */
498 	long			state[MEMCG_VMSTAT_SIZE];
499 	unsigned long		events[NR_MEMCG_EVENTS];
500 
501 	/* Delta calculation for lockless upward propagation */
502 	long			state_prev[MEMCG_VMSTAT_SIZE];
503 	unsigned long		events_prev[NR_MEMCG_EVENTS];
504 } ____cacheline_aligned;
505 
506 struct memcg_vmstats {
507 	/* Aggregated (CPU and subtree) page state & events */
508 	long			state[MEMCG_VMSTAT_SIZE];
509 	unsigned long		events[NR_MEMCG_EVENTS];
510 
511 	/* Non-hierarchical (CPU aggregated) page state & events */
512 	long			state_local[MEMCG_VMSTAT_SIZE];
513 	unsigned long		events_local[NR_MEMCG_EVENTS];
514 
515 	/* Pending child counts during tree propagation */
516 	long			state_pending[MEMCG_VMSTAT_SIZE];
517 	unsigned long		events_pending[NR_MEMCG_EVENTS];
518 
519 	/* Stats updates since the last flush */
520 	atomic64_t		stats_updates;
521 };
522 
523 /*
524  * memcg and lruvec stats flushing
525  *
526  * Many codepaths leading to stats update or read are performance sensitive and
527  * adding stats flushing in such codepaths is not desirable. So, to optimize the
528  * flushing the kernel does:
529  *
530  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
531  *    rstat update tree grow unbounded.
532  *
533  * 2) Flush the stats synchronously on reader side only when there are more than
534  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
535  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
536  *    only for 2 seconds due to (1).
537  */
538 static void flush_memcg_stats_dwork(struct work_struct *w);
539 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
540 static u64 flush_last_time;
541 
542 #define FLUSH_TIME (2UL*HZ)
543 
544 /*
545  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
546  * not rely on this as part of an acquired spinlock_t lock. These functions are
547  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
548  * is sufficient.
549  */
memcg_stats_lock(void)550 static void memcg_stats_lock(void)
551 {
552 	preempt_disable_nested();
553 	VM_WARN_ON_IRQS_ENABLED();
554 }
555 
__memcg_stats_lock(void)556 static void __memcg_stats_lock(void)
557 {
558 	preempt_disable_nested();
559 }
560 
memcg_stats_unlock(void)561 static void memcg_stats_unlock(void)
562 {
563 	preempt_enable_nested();
564 }
565 
566 
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)567 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
568 {
569 	return atomic64_read(&vmstats->stats_updates) >
570 		MEMCG_CHARGE_BATCH * num_online_cpus();
571 }
572 
memcg_rstat_updated(struct mem_cgroup * memcg,int val)573 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
574 {
575 	struct memcg_vmstats_percpu *statc;
576 	int cpu = smp_processor_id();
577 	unsigned int stats_updates;
578 
579 	if (!val)
580 		return;
581 
582 	cgroup_rstat_updated(memcg->css.cgroup, cpu);
583 	statc = this_cpu_ptr(memcg->vmstats_percpu);
584 	for (; statc; statc = statc->parent) {
585 		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
586 		WRITE_ONCE(statc->stats_updates, stats_updates);
587 		if (stats_updates < MEMCG_CHARGE_BATCH)
588 			continue;
589 
590 		/*
591 		 * If @memcg is already flush-able, increasing stats_updates is
592 		 * redundant. Avoid the overhead of the atomic update.
593 		 */
594 		if (!memcg_vmstats_needs_flush(statc->vmstats))
595 			atomic64_add(stats_updates,
596 				     &statc->vmstats->stats_updates);
597 		WRITE_ONCE(statc->stats_updates, 0);
598 	}
599 }
600 
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)601 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
602 {
603 	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
604 
605 	trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
606 		force, needs_flush);
607 
608 	if (!force && !needs_flush)
609 		return;
610 
611 	if (mem_cgroup_is_root(memcg))
612 		WRITE_ONCE(flush_last_time, jiffies_64);
613 
614 	cgroup_rstat_flush(memcg->css.cgroup);
615 }
616 
617 /*
618  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
619  * @memcg: root of the subtree to flush
620  *
621  * Flushing is serialized by the underlying global rstat lock. There is also a
622  * minimum amount of work to be done even if there are no stat updates to flush.
623  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
624  * avoids unnecessary work and contention on the underlying lock.
625  */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)626 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
627 {
628 	if (mem_cgroup_disabled())
629 		return;
630 
631 	if (!memcg)
632 		memcg = root_mem_cgroup;
633 
634 	__mem_cgroup_flush_stats(memcg, false);
635 }
636 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)637 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
638 {
639 	/* Only flush if the periodic flusher is one full cycle late */
640 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
641 		mem_cgroup_flush_stats(memcg);
642 }
643 
flush_memcg_stats_dwork(struct work_struct * w)644 static void flush_memcg_stats_dwork(struct work_struct *w)
645 {
646 	/*
647 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
648 	 * in latency-sensitive paths is as cheap as possible.
649 	 */
650 	__mem_cgroup_flush_stats(root_mem_cgroup, true);
651 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
652 }
653 
memcg_page_state(struct mem_cgroup * memcg,int idx)654 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
655 {
656 	long x;
657 	int i = memcg_stats_index(idx);
658 
659 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
660 		return 0;
661 
662 	x = READ_ONCE(memcg->vmstats->state[i]);
663 #ifdef CONFIG_SMP
664 	if (x < 0)
665 		x = 0;
666 #endif
667 	return x;
668 }
669 
670 static int memcg_page_state_unit(int item);
671 
672 /*
673  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
674  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
675  */
memcg_state_val_in_pages(int idx,int val)676 static int memcg_state_val_in_pages(int idx, int val)
677 {
678 	int unit = memcg_page_state_unit(idx);
679 
680 	if (!val || unit == PAGE_SIZE)
681 		return val;
682 	else
683 		return max(val * unit / PAGE_SIZE, 1UL);
684 }
685 
686 /**
687  * __mod_memcg_state - update cgroup memory statistics
688  * @memcg: the memory cgroup
689  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
690  * @val: delta to add to the counter, can be negative
691  */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)692 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
693 		       int val)
694 {
695 	int i = memcg_stats_index(idx);
696 
697 	if (mem_cgroup_disabled())
698 		return;
699 
700 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
701 		return;
702 
703 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
704 	val = memcg_state_val_in_pages(idx, val);
705 	memcg_rstat_updated(memcg, val);
706 	trace_mod_memcg_state(memcg, idx, val);
707 }
708 
709 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)710 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
711 {
712 	long x;
713 	int i = memcg_stats_index(idx);
714 
715 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
716 		return 0;
717 
718 	x = READ_ONCE(memcg->vmstats->state_local[i]);
719 #ifdef CONFIG_SMP
720 	if (x < 0)
721 		x = 0;
722 #endif
723 	return x;
724 }
725 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)726 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
727 				     enum node_stat_item idx,
728 				     int val)
729 {
730 	struct mem_cgroup_per_node *pn;
731 	struct mem_cgroup *memcg;
732 	int i = memcg_stats_index(idx);
733 
734 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
735 		return;
736 
737 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
738 	memcg = pn->memcg;
739 
740 	/*
741 	 * The caller from rmap relies on disabled preemption because they never
742 	 * update their counter from in-interrupt context. For these two
743 	 * counters we check that the update is never performed from an
744 	 * interrupt context while other caller need to have disabled interrupt.
745 	 */
746 	__memcg_stats_lock();
747 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
748 		switch (idx) {
749 		case NR_ANON_MAPPED:
750 		case NR_FILE_MAPPED:
751 		case NR_ANON_THPS:
752 			WARN_ON_ONCE(!in_task());
753 			break;
754 		default:
755 			VM_WARN_ON_IRQS_ENABLED();
756 		}
757 	}
758 
759 	/* Update memcg */
760 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
761 
762 	/* Update lruvec */
763 	__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
764 
765 	val = memcg_state_val_in_pages(idx, val);
766 	memcg_rstat_updated(memcg, val);
767 	trace_mod_memcg_lruvec_state(memcg, idx, val);
768 	memcg_stats_unlock();
769 }
770 
771 /**
772  * __mod_lruvec_state - update lruvec memory statistics
773  * @lruvec: the lruvec
774  * @idx: the stat item
775  * @val: delta to add to the counter, can be negative
776  *
777  * The lruvec is the intersection of the NUMA node and a cgroup. This
778  * function updates the all three counters that are affected by a
779  * change of state at this level: per-node, per-cgroup, per-lruvec.
780  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)781 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
782 			int val)
783 {
784 	/* Update node */
785 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
786 
787 	/* Update memcg and lruvec */
788 	if (!mem_cgroup_disabled())
789 		__mod_memcg_lruvec_state(lruvec, idx, val);
790 }
791 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)792 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
793 			     int val)
794 {
795 	struct mem_cgroup *memcg;
796 	pg_data_t *pgdat = folio_pgdat(folio);
797 	struct lruvec *lruvec;
798 
799 	rcu_read_lock();
800 	memcg = folio_memcg(folio);
801 	/* Untracked pages have no memcg, no lruvec. Update only the node */
802 	if (!memcg) {
803 		rcu_read_unlock();
804 		__mod_node_page_state(pgdat, idx, val);
805 		return;
806 	}
807 
808 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
809 	__mod_lruvec_state(lruvec, idx, val);
810 	rcu_read_unlock();
811 }
812 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
813 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)814 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
815 {
816 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
817 	struct mem_cgroup *memcg;
818 	struct lruvec *lruvec;
819 
820 	rcu_read_lock();
821 	memcg = mem_cgroup_from_slab_obj(p);
822 
823 	/*
824 	 * Untracked pages have no memcg, no lruvec. Update only the
825 	 * node. If we reparent the slab objects to the root memcg,
826 	 * when we free the slab object, we need to update the per-memcg
827 	 * vmstats to keep it correct for the root memcg.
828 	 */
829 	if (!memcg) {
830 		__mod_node_page_state(pgdat, idx, val);
831 	} else {
832 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
833 		__mod_lruvec_state(lruvec, idx, val);
834 	}
835 	rcu_read_unlock();
836 }
837 
838 /**
839  * __count_memcg_events - account VM events in a cgroup
840  * @memcg: the memory cgroup
841  * @idx: the event item
842  * @count: the number of events that occurred
843  */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)844 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
845 			  unsigned long count)
846 {
847 	int i = memcg_events_index(idx);
848 
849 	if (mem_cgroup_disabled())
850 		return;
851 
852 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
853 		return;
854 
855 	memcg_stats_lock();
856 	__this_cpu_add(memcg->vmstats_percpu->events[i], count);
857 	memcg_rstat_updated(memcg, count);
858 	trace_count_memcg_events(memcg, idx, count);
859 	memcg_stats_unlock();
860 }
861 
memcg_events(struct mem_cgroup * memcg,int event)862 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
863 {
864 	int i = memcg_events_index(event);
865 
866 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
867 		return 0;
868 
869 	return READ_ONCE(memcg->vmstats->events[i]);
870 }
871 
memcg_events_local(struct mem_cgroup * memcg,int event)872 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
873 {
874 	int i = memcg_events_index(event);
875 
876 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
877 		return 0;
878 
879 	return READ_ONCE(memcg->vmstats->events_local[i]);
880 }
881 
mem_cgroup_from_task(struct task_struct * p)882 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
883 {
884 	/*
885 	 * mm_update_next_owner() may clear mm->owner to NULL
886 	 * if it races with swapoff, page migration, etc.
887 	 * So this can be called with p == NULL.
888 	 */
889 	if (unlikely(!p))
890 		return NULL;
891 
892 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
893 }
894 EXPORT_SYMBOL(mem_cgroup_from_task);
895 
active_memcg(void)896 static __always_inline struct mem_cgroup *active_memcg(void)
897 {
898 	if (!in_task())
899 		return this_cpu_read(int_active_memcg);
900 	else
901 		return current->active_memcg;
902 }
903 
904 /**
905  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
906  * @mm: mm from which memcg should be extracted. It can be NULL.
907  *
908  * Obtain a reference on mm->memcg and returns it if successful. If mm
909  * is NULL, then the memcg is chosen as follows:
910  * 1) The active memcg, if set.
911  * 2) current->mm->memcg, if available
912  * 3) root memcg
913  * If mem_cgroup is disabled, NULL is returned.
914  */
get_mem_cgroup_from_mm(struct mm_struct * mm)915 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
916 {
917 	struct mem_cgroup *memcg;
918 
919 	if (mem_cgroup_disabled())
920 		return NULL;
921 
922 	/*
923 	 * Page cache insertions can happen without an
924 	 * actual mm context, e.g. during disk probing
925 	 * on boot, loopback IO, acct() writes etc.
926 	 *
927 	 * No need to css_get on root memcg as the reference
928 	 * counting is disabled on the root level in the
929 	 * cgroup core. See CSS_NO_REF.
930 	 */
931 	if (unlikely(!mm)) {
932 		memcg = active_memcg();
933 		if (unlikely(memcg)) {
934 			/* remote memcg must hold a ref */
935 			css_get(&memcg->css);
936 			return memcg;
937 		}
938 		mm = current->mm;
939 		if (unlikely(!mm))
940 			return root_mem_cgroup;
941 	}
942 
943 	rcu_read_lock();
944 	do {
945 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
946 		if (unlikely(!memcg))
947 			memcg = root_mem_cgroup;
948 	} while (!css_tryget(&memcg->css));
949 	rcu_read_unlock();
950 	return memcg;
951 }
952 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
953 
954 /**
955  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
956  */
get_mem_cgroup_from_current(void)957 struct mem_cgroup *get_mem_cgroup_from_current(void)
958 {
959 	struct mem_cgroup *memcg;
960 
961 	if (mem_cgroup_disabled())
962 		return NULL;
963 
964 again:
965 	rcu_read_lock();
966 	memcg = mem_cgroup_from_task(current);
967 	if (!css_tryget(&memcg->css)) {
968 		rcu_read_unlock();
969 		goto again;
970 	}
971 	rcu_read_unlock();
972 	return memcg;
973 }
974 
975 /**
976  * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
977  * @folio: folio from which memcg should be extracted.
978  */
get_mem_cgroup_from_folio(struct folio * folio)979 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
980 {
981 	struct mem_cgroup *memcg = folio_memcg(folio);
982 
983 	if (mem_cgroup_disabled())
984 		return NULL;
985 
986 	rcu_read_lock();
987 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
988 		memcg = root_mem_cgroup;
989 	rcu_read_unlock();
990 	return memcg;
991 }
992 
993 /**
994  * mem_cgroup_iter - iterate over memory cgroup hierarchy
995  * @root: hierarchy root
996  * @prev: previously returned memcg, NULL on first invocation
997  * @reclaim: cookie for shared reclaim walks, NULL for full walks
998  *
999  * Returns references to children of the hierarchy below @root, or
1000  * @root itself, or %NULL after a full round-trip.
1001  *
1002  * Caller must pass the return value in @prev on subsequent
1003  * invocations for reference counting, or use mem_cgroup_iter_break()
1004  * to cancel a hierarchy walk before the round-trip is complete.
1005  *
1006  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1007  * in the hierarchy among all concurrent reclaimers operating on the
1008  * same node.
1009  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1010 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1011 				   struct mem_cgroup *prev,
1012 				   struct mem_cgroup_reclaim_cookie *reclaim)
1013 {
1014 	struct mem_cgroup_reclaim_iter *iter;
1015 	struct cgroup_subsys_state *css;
1016 	struct mem_cgroup *pos;
1017 	struct mem_cgroup *next;
1018 
1019 	if (mem_cgroup_disabled())
1020 		return NULL;
1021 
1022 	if (!root)
1023 		root = root_mem_cgroup;
1024 
1025 	rcu_read_lock();
1026 restart:
1027 	next = NULL;
1028 
1029 	if (reclaim) {
1030 		int gen;
1031 		int nid = reclaim->pgdat->node_id;
1032 
1033 		iter = &root->nodeinfo[nid]->iter;
1034 		gen = atomic_read(&iter->generation);
1035 
1036 		/*
1037 		 * On start, join the current reclaim iteration cycle.
1038 		 * Exit when a concurrent walker completes it.
1039 		 */
1040 		if (!prev)
1041 			reclaim->generation = gen;
1042 		else if (reclaim->generation != gen)
1043 			goto out_unlock;
1044 
1045 		pos = READ_ONCE(iter->position);
1046 	} else
1047 		pos = prev;
1048 
1049 	css = pos ? &pos->css : NULL;
1050 
1051 	while ((css = css_next_descendant_pre(css, &root->css))) {
1052 		/*
1053 		 * Verify the css and acquire a reference.  The root
1054 		 * is provided by the caller, so we know it's alive
1055 		 * and kicking, and don't take an extra reference.
1056 		 */
1057 		if (css == &root->css || css_tryget(css))
1058 			break;
1059 	}
1060 
1061 	next = mem_cgroup_from_css(css);
1062 
1063 	if (reclaim) {
1064 		/*
1065 		 * The position could have already been updated by a competing
1066 		 * thread, so check that the value hasn't changed since we read
1067 		 * it to avoid reclaiming from the same cgroup twice.
1068 		 */
1069 		if (cmpxchg(&iter->position, pos, next) != pos) {
1070 			if (css && css != &root->css)
1071 				css_put(css);
1072 			goto restart;
1073 		}
1074 
1075 		if (!next) {
1076 			atomic_inc(&iter->generation);
1077 
1078 			/*
1079 			 * Reclaimers share the hierarchy walk, and a
1080 			 * new one might jump in right at the end of
1081 			 * the hierarchy - make sure they see at least
1082 			 * one group and restart from the beginning.
1083 			 */
1084 			if (!prev)
1085 				goto restart;
1086 		}
1087 	}
1088 
1089 out_unlock:
1090 	rcu_read_unlock();
1091 	if (prev && prev != root)
1092 		css_put(&prev->css);
1093 
1094 	return next;
1095 }
1096 
1097 /**
1098  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1099  * @root: hierarchy root
1100  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1101  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1102 void mem_cgroup_iter_break(struct mem_cgroup *root,
1103 			   struct mem_cgroup *prev)
1104 {
1105 	if (!root)
1106 		root = root_mem_cgroup;
1107 	if (prev && prev != root)
1108 		css_put(&prev->css);
1109 }
1110 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1111 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1112 					struct mem_cgroup *dead_memcg)
1113 {
1114 	struct mem_cgroup_reclaim_iter *iter;
1115 	struct mem_cgroup_per_node *mz;
1116 	int nid;
1117 
1118 	for_each_node(nid) {
1119 		mz = from->nodeinfo[nid];
1120 		iter = &mz->iter;
1121 		cmpxchg(&iter->position, dead_memcg, NULL);
1122 	}
1123 }
1124 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1125 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1126 {
1127 	struct mem_cgroup *memcg = dead_memcg;
1128 	struct mem_cgroup *last;
1129 
1130 	do {
1131 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1132 		last = memcg;
1133 	} while ((memcg = parent_mem_cgroup(memcg)));
1134 
1135 	/*
1136 	 * When cgroup1 non-hierarchy mode is used,
1137 	 * parent_mem_cgroup() does not walk all the way up to the
1138 	 * cgroup root (root_mem_cgroup). So we have to handle
1139 	 * dead_memcg from cgroup root separately.
1140 	 */
1141 	if (!mem_cgroup_is_root(last))
1142 		__invalidate_reclaim_iterators(root_mem_cgroup,
1143 						dead_memcg);
1144 }
1145 
1146 /**
1147  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1148  * @memcg: hierarchy root
1149  * @fn: function to call for each task
1150  * @arg: argument passed to @fn
1151  *
1152  * This function iterates over tasks attached to @memcg or to any of its
1153  * descendants and calls @fn for each task. If @fn returns a non-zero
1154  * value, the function breaks the iteration loop. Otherwise, it will iterate
1155  * over all tasks and return 0.
1156  *
1157  * This function must not be called for the root memory cgroup.
1158  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1159 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1160 			   int (*fn)(struct task_struct *, void *), void *arg)
1161 {
1162 	struct mem_cgroup *iter;
1163 	int ret = 0;
1164 	int i = 0;
1165 
1166 	BUG_ON(mem_cgroup_is_root(memcg));
1167 
1168 	for_each_mem_cgroup_tree(iter, memcg) {
1169 		struct css_task_iter it;
1170 		struct task_struct *task;
1171 
1172 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1173 		while (!ret && (task = css_task_iter_next(&it))) {
1174 			/* Avoid potential softlockup warning */
1175 			if ((++i & 1023) == 0)
1176 				cond_resched();
1177 			ret = fn(task, arg);
1178 		}
1179 		css_task_iter_end(&it);
1180 		if (ret) {
1181 			mem_cgroup_iter_break(memcg, iter);
1182 			break;
1183 		}
1184 	}
1185 }
1186 
1187 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1188 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1189 {
1190 	struct mem_cgroup *memcg;
1191 
1192 	if (mem_cgroup_disabled())
1193 		return;
1194 
1195 	memcg = folio_memcg(folio);
1196 
1197 	if (!memcg)
1198 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1199 	else
1200 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1201 }
1202 #endif
1203 
1204 /**
1205  * folio_lruvec_lock - Lock the lruvec for a folio.
1206  * @folio: Pointer to the folio.
1207  *
1208  * These functions are safe to use under any of the following conditions:
1209  * - folio locked
1210  * - folio_test_lru false
1211  * - folio frozen (refcount of 0)
1212  *
1213  * Return: The lruvec this folio is on with its lock held.
1214  */
folio_lruvec_lock(struct folio * folio)1215 struct lruvec *folio_lruvec_lock(struct folio *folio)
1216 {
1217 	struct lruvec *lruvec = folio_lruvec(folio);
1218 
1219 	spin_lock(&lruvec->lru_lock);
1220 	lruvec_memcg_debug(lruvec, folio);
1221 
1222 	return lruvec;
1223 }
1224 
1225 /**
1226  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1227  * @folio: Pointer to the folio.
1228  *
1229  * These functions are safe to use under any of the following conditions:
1230  * - folio locked
1231  * - folio_test_lru false
1232  * - folio frozen (refcount of 0)
1233  *
1234  * Return: The lruvec this folio is on with its lock held and interrupts
1235  * disabled.
1236  */
folio_lruvec_lock_irq(struct folio * folio)1237 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1238 {
1239 	struct lruvec *lruvec = folio_lruvec(folio);
1240 
1241 	spin_lock_irq(&lruvec->lru_lock);
1242 	lruvec_memcg_debug(lruvec, folio);
1243 
1244 	return lruvec;
1245 }
1246 
1247 /**
1248  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1249  * @folio: Pointer to the folio.
1250  * @flags: Pointer to irqsave flags.
1251  *
1252  * These functions are safe to use under any of the following conditions:
1253  * - folio locked
1254  * - folio_test_lru false
1255  * - folio frozen (refcount of 0)
1256  *
1257  * Return: The lruvec this folio is on with its lock held and interrupts
1258  * disabled.
1259  */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1260 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1261 		unsigned long *flags)
1262 {
1263 	struct lruvec *lruvec = folio_lruvec(folio);
1264 
1265 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1266 	lruvec_memcg_debug(lruvec, folio);
1267 
1268 	return lruvec;
1269 }
1270 
1271 /**
1272  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1273  * @lruvec: mem_cgroup per zone lru vector
1274  * @lru: index of lru list the page is sitting on
1275  * @zid: zone id of the accounted pages
1276  * @nr_pages: positive when adding or negative when removing
1277  *
1278  * This function must be called under lru_lock, just before a page is added
1279  * to or just after a page is removed from an lru list.
1280  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1281 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1282 				int zid, int nr_pages)
1283 {
1284 	struct mem_cgroup_per_node *mz;
1285 	unsigned long *lru_size;
1286 	long size;
1287 
1288 	if (mem_cgroup_disabled())
1289 		return;
1290 
1291 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1292 	lru_size = &mz->lru_zone_size[zid][lru];
1293 
1294 	if (nr_pages < 0)
1295 		*lru_size += nr_pages;
1296 
1297 	size = *lru_size;
1298 	if (WARN_ONCE(size < 0,
1299 		"%s(%p, %d, %d): lru_size %ld\n",
1300 		__func__, lruvec, lru, nr_pages, size)) {
1301 		VM_BUG_ON(1);
1302 		*lru_size = 0;
1303 	}
1304 
1305 	if (nr_pages > 0)
1306 		*lru_size += nr_pages;
1307 }
1308 
1309 /**
1310  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1311  * @memcg: the memory cgroup
1312  *
1313  * Returns the maximum amount of memory @mem can be charged with, in
1314  * pages.
1315  */
mem_cgroup_margin(struct mem_cgroup * memcg)1316 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1317 {
1318 	unsigned long margin = 0;
1319 	unsigned long count;
1320 	unsigned long limit;
1321 
1322 	count = page_counter_read(&memcg->memory);
1323 	limit = READ_ONCE(memcg->memory.max);
1324 	if (count < limit)
1325 		margin = limit - count;
1326 
1327 	if (do_memsw_account()) {
1328 		count = page_counter_read(&memcg->memsw);
1329 		limit = READ_ONCE(memcg->memsw.max);
1330 		if (count < limit)
1331 			margin = min(margin, limit - count);
1332 		else
1333 			margin = 0;
1334 	}
1335 
1336 	return margin;
1337 }
1338 
1339 struct memory_stat {
1340 	const char *name;
1341 	unsigned int idx;
1342 };
1343 
1344 static const struct memory_stat memory_stats[] = {
1345 	{ "anon",			NR_ANON_MAPPED			},
1346 	{ "file",			NR_FILE_PAGES			},
1347 	{ "kernel",			MEMCG_KMEM			},
1348 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1349 	{ "pagetables",			NR_PAGETABLE			},
1350 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1351 	{ "percpu",			MEMCG_PERCPU_B			},
1352 	{ "sock",			MEMCG_SOCK			},
1353 	{ "vmalloc",			MEMCG_VMALLOC			},
1354 	{ "shmem",			NR_SHMEM			},
1355 #ifdef CONFIG_ZSWAP
1356 	{ "zswap",			MEMCG_ZSWAP_B			},
1357 	{ "zswapped",			MEMCG_ZSWAPPED			},
1358 #endif
1359 	{ "file_mapped",		NR_FILE_MAPPED			},
1360 	{ "file_dirty",			NR_FILE_DIRTY			},
1361 	{ "file_writeback",		NR_WRITEBACK			},
1362 #ifdef CONFIG_SWAP
1363 	{ "swapcached",			NR_SWAPCACHE			},
1364 #endif
1365 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1366 	{ "anon_thp",			NR_ANON_THPS			},
1367 	{ "file_thp",			NR_FILE_THPS			},
1368 	{ "shmem_thp",			NR_SHMEM_THPS			},
1369 #endif
1370 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1371 	{ "active_anon",		NR_ACTIVE_ANON			},
1372 	{ "inactive_file",		NR_INACTIVE_FILE		},
1373 	{ "active_file",		NR_ACTIVE_FILE			},
1374 	{ "unevictable",		NR_UNEVICTABLE			},
1375 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1376 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1377 #ifdef CONFIG_HUGETLB_PAGE
1378 	{ "hugetlb",			NR_HUGETLB			},
1379 #endif
1380 
1381 	/* The memory events */
1382 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1383 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1384 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1385 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1386 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1387 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1388 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1389 
1390 	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1391 	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1392 	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1393 #ifdef CONFIG_NUMA_BALANCING
1394 	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1395 #endif
1396 };
1397 
1398 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1399 static int memcg_page_state_unit(int item)
1400 {
1401 	switch (item) {
1402 	case MEMCG_PERCPU_B:
1403 	case MEMCG_ZSWAP_B:
1404 	case NR_SLAB_RECLAIMABLE_B:
1405 	case NR_SLAB_UNRECLAIMABLE_B:
1406 		return 1;
1407 	case NR_KERNEL_STACK_KB:
1408 		return SZ_1K;
1409 	default:
1410 		return PAGE_SIZE;
1411 	}
1412 }
1413 
1414 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1415 static int memcg_page_state_output_unit(int item)
1416 {
1417 	/*
1418 	 * Workingset state is actually in pages, but we export it to userspace
1419 	 * as a scalar count of events, so special case it here.
1420 	 *
1421 	 * Demotion and promotion activities are exported in pages, consistent
1422 	 * with their global counterparts.
1423 	 */
1424 	switch (item) {
1425 	case WORKINGSET_REFAULT_ANON:
1426 	case WORKINGSET_REFAULT_FILE:
1427 	case WORKINGSET_ACTIVATE_ANON:
1428 	case WORKINGSET_ACTIVATE_FILE:
1429 	case WORKINGSET_RESTORE_ANON:
1430 	case WORKINGSET_RESTORE_FILE:
1431 	case WORKINGSET_NODERECLAIM:
1432 	case PGDEMOTE_KSWAPD:
1433 	case PGDEMOTE_DIRECT:
1434 	case PGDEMOTE_KHUGEPAGED:
1435 #ifdef CONFIG_NUMA_BALANCING
1436 	case PGPROMOTE_SUCCESS:
1437 #endif
1438 		return 1;
1439 	default:
1440 		return memcg_page_state_unit(item);
1441 	}
1442 }
1443 
memcg_page_state_output(struct mem_cgroup * memcg,int item)1444 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1445 {
1446 	return memcg_page_state(memcg, item) *
1447 		memcg_page_state_output_unit(item);
1448 }
1449 
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1450 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1451 {
1452 	return memcg_page_state_local(memcg, item) *
1453 		memcg_page_state_output_unit(item);
1454 }
1455 
1456 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1457 static bool memcg_accounts_hugetlb(void)
1458 {
1459 	return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1460 }
1461 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1462 static bool memcg_accounts_hugetlb(void)
1463 {
1464 	return false;
1465 }
1466 #endif /* CONFIG_HUGETLB_PAGE */
1467 
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1468 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1469 {
1470 	int i;
1471 
1472 	/*
1473 	 * Provide statistics on the state of the memory subsystem as
1474 	 * well as cumulative event counters that show past behavior.
1475 	 *
1476 	 * This list is ordered following a combination of these gradients:
1477 	 * 1) generic big picture -> specifics and details
1478 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1479 	 *
1480 	 * Current memory state:
1481 	 */
1482 	mem_cgroup_flush_stats(memcg);
1483 
1484 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1485 		u64 size;
1486 
1487 #ifdef CONFIG_HUGETLB_PAGE
1488 		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1489 			!memcg_accounts_hugetlb())
1490 			continue;
1491 #endif
1492 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1493 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1494 
1495 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1496 			size += memcg_page_state_output(memcg,
1497 							NR_SLAB_RECLAIMABLE_B);
1498 			seq_buf_printf(s, "slab %llu\n", size);
1499 		}
1500 	}
1501 
1502 	/* Accumulated memory events */
1503 	seq_buf_printf(s, "pgscan %lu\n",
1504 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1505 		       memcg_events(memcg, PGSCAN_DIRECT) +
1506 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1507 	seq_buf_printf(s, "pgsteal %lu\n",
1508 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1509 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1510 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1511 
1512 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1513 #ifdef CONFIG_MEMCG_V1
1514 		if (memcg_vm_event_stat[i] == PGPGIN ||
1515 		    memcg_vm_event_stat[i] == PGPGOUT)
1516 			continue;
1517 #endif
1518 		seq_buf_printf(s, "%s %lu\n",
1519 			       vm_event_name(memcg_vm_event_stat[i]),
1520 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1521 	}
1522 }
1523 
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1524 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1525 {
1526 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1527 		memcg_stat_format(memcg, s);
1528 	else
1529 		memcg1_stat_format(memcg, s);
1530 	if (seq_buf_has_overflowed(s))
1531 		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1532 }
1533 
1534 /**
1535  * mem_cgroup_print_oom_context: Print OOM information relevant to
1536  * memory controller.
1537  * @memcg: The memory cgroup that went over limit
1538  * @p: Task that is going to be killed
1539  *
1540  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1541  * enabled
1542  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1543 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1544 {
1545 	rcu_read_lock();
1546 
1547 	if (memcg) {
1548 		pr_cont(",oom_memcg=");
1549 		pr_cont_cgroup_path(memcg->css.cgroup);
1550 	} else
1551 		pr_cont(",global_oom");
1552 	if (p) {
1553 		pr_cont(",task_memcg=");
1554 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1555 	}
1556 	rcu_read_unlock();
1557 }
1558 
1559 /**
1560  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1561  * memory controller.
1562  * @memcg: The memory cgroup that went over limit
1563  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1564 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1565 {
1566 	/* Use static buffer, for the caller is holding oom_lock. */
1567 	static char buf[SEQ_BUF_SIZE];
1568 	struct seq_buf s;
1569 
1570 	lockdep_assert_held(&oom_lock);
1571 
1572 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1573 		K((u64)page_counter_read(&memcg->memory)),
1574 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1575 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1576 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1577 			K((u64)page_counter_read(&memcg->swap)),
1578 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1579 #ifdef CONFIG_MEMCG_V1
1580 	else {
1581 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1582 			K((u64)page_counter_read(&memcg->memsw)),
1583 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1584 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1585 			K((u64)page_counter_read(&memcg->kmem)),
1586 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1587 	}
1588 #endif
1589 
1590 	pr_info("Memory cgroup stats for ");
1591 	pr_cont_cgroup_path(memcg->css.cgroup);
1592 	pr_cont(":");
1593 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1594 	memory_stat_format(memcg, &s);
1595 	seq_buf_do_printk(&s, KERN_INFO);
1596 }
1597 
1598 /*
1599  * Return the memory (and swap, if configured) limit for a memcg.
1600  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1601 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1602 {
1603 	unsigned long max = READ_ONCE(memcg->memory.max);
1604 
1605 	if (do_memsw_account()) {
1606 		if (mem_cgroup_swappiness(memcg)) {
1607 			/* Calculate swap excess capacity from memsw limit */
1608 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1609 
1610 			max += min(swap, (unsigned long)total_swap_pages);
1611 		}
1612 	} else {
1613 		if (mem_cgroup_swappiness(memcg))
1614 			max += min(READ_ONCE(memcg->swap.max),
1615 				   (unsigned long)total_swap_pages);
1616 	}
1617 	return max;
1618 }
1619 
mem_cgroup_size(struct mem_cgroup * memcg)1620 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1621 {
1622 	return page_counter_read(&memcg->memory);
1623 }
1624 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1625 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1626 				     int order)
1627 {
1628 	struct oom_control oc = {
1629 		.zonelist = NULL,
1630 		.nodemask = NULL,
1631 		.memcg = memcg,
1632 		.gfp_mask = gfp_mask,
1633 		.order = order,
1634 	};
1635 	bool ret = true;
1636 
1637 	if (mutex_lock_killable(&oom_lock))
1638 		return true;
1639 
1640 	if (mem_cgroup_margin(memcg) >= (1 << order))
1641 		goto unlock;
1642 
1643 	/*
1644 	 * A few threads which were not waiting at mutex_lock_killable() can
1645 	 * fail to bail out. Therefore, check again after holding oom_lock.
1646 	 */
1647 	ret = task_is_dying() || out_of_memory(&oc);
1648 
1649 unlock:
1650 	mutex_unlock(&oom_lock);
1651 	return ret;
1652 }
1653 
1654 /*
1655  * Returns true if successfully killed one or more processes. Though in some
1656  * corner cases it can return true even without killing any process.
1657  */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1658 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1659 {
1660 	bool locked, ret;
1661 
1662 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1663 		return false;
1664 
1665 	memcg_memory_event(memcg, MEMCG_OOM);
1666 
1667 	if (!memcg1_oom_prepare(memcg, &locked))
1668 		return false;
1669 
1670 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1671 
1672 	memcg1_oom_finish(memcg, locked);
1673 
1674 	return ret;
1675 }
1676 
1677 /**
1678  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1679  * @victim: task to be killed by the OOM killer
1680  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1681  *
1682  * Returns a pointer to a memory cgroup, which has to be cleaned up
1683  * by killing all belonging OOM-killable tasks.
1684  *
1685  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1686  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1687 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1688 					    struct mem_cgroup *oom_domain)
1689 {
1690 	struct mem_cgroup *oom_group = NULL;
1691 	struct mem_cgroup *memcg;
1692 
1693 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1694 		return NULL;
1695 
1696 	if (!oom_domain)
1697 		oom_domain = root_mem_cgroup;
1698 
1699 	rcu_read_lock();
1700 
1701 	memcg = mem_cgroup_from_task(victim);
1702 	if (mem_cgroup_is_root(memcg))
1703 		goto out;
1704 
1705 	/*
1706 	 * If the victim task has been asynchronously moved to a different
1707 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1708 	 * In this case it's better to ignore memory.group.oom.
1709 	 */
1710 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1711 		goto out;
1712 
1713 	/*
1714 	 * Traverse the memory cgroup hierarchy from the victim task's
1715 	 * cgroup up to the OOMing cgroup (or root) to find the
1716 	 * highest-level memory cgroup with oom.group set.
1717 	 */
1718 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1719 		if (READ_ONCE(memcg->oom_group))
1720 			oom_group = memcg;
1721 
1722 		if (memcg == oom_domain)
1723 			break;
1724 	}
1725 
1726 	if (oom_group)
1727 		css_get(&oom_group->css);
1728 out:
1729 	rcu_read_unlock();
1730 
1731 	return oom_group;
1732 }
1733 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1734 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1735 {
1736 	pr_info("Tasks in ");
1737 	pr_cont_cgroup_path(memcg->css.cgroup);
1738 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1739 }
1740 
1741 struct memcg_stock_pcp {
1742 	local_lock_t stock_lock;
1743 	struct mem_cgroup *cached; /* this never be root cgroup */
1744 	unsigned int nr_pages;
1745 
1746 	struct obj_cgroup *cached_objcg;
1747 	struct pglist_data *cached_pgdat;
1748 	unsigned int nr_bytes;
1749 	int nr_slab_reclaimable_b;
1750 	int nr_slab_unreclaimable_b;
1751 
1752 	struct work_struct work;
1753 	unsigned long flags;
1754 #define FLUSHING_CACHED_CHARGE	0
1755 };
1756 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1757 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1758 };
1759 static DEFINE_MUTEX(percpu_charge_mutex);
1760 
1761 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1762 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1763 				     struct mem_cgroup *root_memcg);
1764 
1765 /**
1766  * consume_stock: Try to consume stocked charge on this cpu.
1767  * @memcg: memcg to consume from.
1768  * @nr_pages: how many pages to charge.
1769  *
1770  * The charges will only happen if @memcg matches the current cpu's memcg
1771  * stock, and at least @nr_pages are available in that stock.  Failure to
1772  * service an allocation will refill the stock.
1773  *
1774  * returns true if successful, false otherwise.
1775  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1776 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1777 {
1778 	struct memcg_stock_pcp *stock;
1779 	unsigned int stock_pages;
1780 	unsigned long flags;
1781 	bool ret = false;
1782 
1783 	if (nr_pages > MEMCG_CHARGE_BATCH)
1784 		return ret;
1785 
1786 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1787 
1788 	stock = this_cpu_ptr(&memcg_stock);
1789 	stock_pages = READ_ONCE(stock->nr_pages);
1790 	if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1791 		WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1792 		ret = true;
1793 	}
1794 
1795 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1796 
1797 	return ret;
1798 }
1799 
1800 /*
1801  * Returns stocks cached in percpu and reset cached information.
1802  */
drain_stock(struct memcg_stock_pcp * stock)1803 static void drain_stock(struct memcg_stock_pcp *stock)
1804 {
1805 	unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1806 	struct mem_cgroup *old = READ_ONCE(stock->cached);
1807 
1808 	if (!old)
1809 		return;
1810 
1811 	if (stock_pages) {
1812 		page_counter_uncharge(&old->memory, stock_pages);
1813 		if (do_memsw_account())
1814 			page_counter_uncharge(&old->memsw, stock_pages);
1815 
1816 		WRITE_ONCE(stock->nr_pages, 0);
1817 	}
1818 
1819 	css_put(&old->css);
1820 	WRITE_ONCE(stock->cached, NULL);
1821 }
1822 
drain_local_stock(struct work_struct * dummy)1823 static void drain_local_stock(struct work_struct *dummy)
1824 {
1825 	struct memcg_stock_pcp *stock;
1826 	struct obj_cgroup *old = NULL;
1827 	unsigned long flags;
1828 
1829 	/*
1830 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1831 	 * drain_stock races is that we always operate on local CPU stock
1832 	 * here with IRQ disabled
1833 	 */
1834 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1835 
1836 	stock = this_cpu_ptr(&memcg_stock);
1837 	old = drain_obj_stock(stock);
1838 	drain_stock(stock);
1839 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1840 
1841 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1842 	obj_cgroup_put(old);
1843 }
1844 
1845 /*
1846  * Cache charges(val) to local per_cpu area.
1847  * This will be consumed by consume_stock() function, later.
1848  */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1849 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1850 {
1851 	struct memcg_stock_pcp *stock;
1852 	unsigned int stock_pages;
1853 
1854 	stock = this_cpu_ptr(&memcg_stock);
1855 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1856 		drain_stock(stock);
1857 		css_get(&memcg->css);
1858 		WRITE_ONCE(stock->cached, memcg);
1859 	}
1860 	stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1861 	WRITE_ONCE(stock->nr_pages, stock_pages);
1862 
1863 	if (stock_pages > MEMCG_CHARGE_BATCH)
1864 		drain_stock(stock);
1865 }
1866 
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1867 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1868 {
1869 	unsigned long flags;
1870 
1871 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1872 	__refill_stock(memcg, nr_pages);
1873 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1874 }
1875 
1876 /*
1877  * Drains all per-CPU charge caches for given root_memcg resp. subtree
1878  * of the hierarchy under it.
1879  */
drain_all_stock(struct mem_cgroup * root_memcg)1880 void drain_all_stock(struct mem_cgroup *root_memcg)
1881 {
1882 	int cpu, curcpu;
1883 
1884 	/* If someone's already draining, avoid adding running more workers. */
1885 	if (!mutex_trylock(&percpu_charge_mutex))
1886 		return;
1887 	/*
1888 	 * Notify other cpus that system-wide "drain" is running
1889 	 * We do not care about races with the cpu hotplug because cpu down
1890 	 * as well as workers from this path always operate on the local
1891 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1892 	 */
1893 	migrate_disable();
1894 	curcpu = smp_processor_id();
1895 	for_each_online_cpu(cpu) {
1896 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1897 		struct mem_cgroup *memcg;
1898 		bool flush = false;
1899 
1900 		rcu_read_lock();
1901 		memcg = READ_ONCE(stock->cached);
1902 		if (memcg && READ_ONCE(stock->nr_pages) &&
1903 		    mem_cgroup_is_descendant(memcg, root_memcg))
1904 			flush = true;
1905 		else if (obj_stock_flush_required(stock, root_memcg))
1906 			flush = true;
1907 		rcu_read_unlock();
1908 
1909 		if (flush &&
1910 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1911 			if (cpu == curcpu)
1912 				drain_local_stock(&stock->work);
1913 			else if (!cpu_is_isolated(cpu))
1914 				schedule_work_on(cpu, &stock->work);
1915 		}
1916 	}
1917 	migrate_enable();
1918 	mutex_unlock(&percpu_charge_mutex);
1919 }
1920 
memcg_hotplug_cpu_dead(unsigned int cpu)1921 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1922 {
1923 	struct memcg_stock_pcp *stock;
1924 	struct obj_cgroup *old;
1925 	unsigned long flags;
1926 
1927 	stock = &per_cpu(memcg_stock, cpu);
1928 
1929 	/* drain_obj_stock requires stock_lock */
1930 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1931 	old = drain_obj_stock(stock);
1932 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1933 
1934 	drain_stock(stock);
1935 	obj_cgroup_put(old);
1936 
1937 	return 0;
1938 }
1939 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1940 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1941 				  unsigned int nr_pages,
1942 				  gfp_t gfp_mask)
1943 {
1944 	unsigned long nr_reclaimed = 0;
1945 
1946 	do {
1947 		unsigned long pflags;
1948 
1949 		if (page_counter_read(&memcg->memory) <=
1950 		    READ_ONCE(memcg->memory.high))
1951 			continue;
1952 
1953 		memcg_memory_event(memcg, MEMCG_HIGH);
1954 
1955 		psi_memstall_enter(&pflags);
1956 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1957 							gfp_mask,
1958 							MEMCG_RECLAIM_MAY_SWAP,
1959 							NULL);
1960 		psi_memstall_leave(&pflags);
1961 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1962 		 !mem_cgroup_is_root(memcg));
1963 
1964 	return nr_reclaimed;
1965 }
1966 
high_work_func(struct work_struct * work)1967 static void high_work_func(struct work_struct *work)
1968 {
1969 	struct mem_cgroup *memcg;
1970 
1971 	memcg = container_of(work, struct mem_cgroup, high_work);
1972 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1973 }
1974 
1975 /*
1976  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1977  * enough to still cause a significant slowdown in most cases, while still
1978  * allowing diagnostics and tracing to proceed without becoming stuck.
1979  */
1980 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1981 
1982 /*
1983  * When calculating the delay, we use these either side of the exponentiation to
1984  * maintain precision and scale to a reasonable number of jiffies (see the table
1985  * below.
1986  *
1987  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1988  *   overage ratio to a delay.
1989  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1990  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
1991  *   to produce a reasonable delay curve.
1992  *
1993  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1994  * reasonable delay curve compared to precision-adjusted overage, not
1995  * penalising heavily at first, but still making sure that growth beyond the
1996  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1997  * example, with a high of 100 megabytes:
1998  *
1999  *  +-------+------------------------+
2000  *  | usage | time to allocate in ms |
2001  *  +-------+------------------------+
2002  *  | 100M  |                      0 |
2003  *  | 101M  |                      6 |
2004  *  | 102M  |                     25 |
2005  *  | 103M  |                     57 |
2006  *  | 104M  |                    102 |
2007  *  | 105M  |                    159 |
2008  *  | 106M  |                    230 |
2009  *  | 107M  |                    313 |
2010  *  | 108M  |                    409 |
2011  *  | 109M  |                    518 |
2012  *  | 110M  |                    639 |
2013  *  | 111M  |                    774 |
2014  *  | 112M  |                    921 |
2015  *  | 113M  |                   1081 |
2016  *  | 114M  |                   1254 |
2017  *  | 115M  |                   1439 |
2018  *  | 116M  |                   1638 |
2019  *  | 117M  |                   1849 |
2020  *  | 118M  |                   2000 |
2021  *  | 119M  |                   2000 |
2022  *  | 120M  |                   2000 |
2023  *  +-------+------------------------+
2024  */
2025  #define MEMCG_DELAY_PRECISION_SHIFT 20
2026  #define MEMCG_DELAY_SCALING_SHIFT 14
2027 
calculate_overage(unsigned long usage,unsigned long high)2028 static u64 calculate_overage(unsigned long usage, unsigned long high)
2029 {
2030 	u64 overage;
2031 
2032 	if (usage <= high)
2033 		return 0;
2034 
2035 	/*
2036 	 * Prevent division by 0 in overage calculation by acting as if
2037 	 * it was a threshold of 1 page
2038 	 */
2039 	high = max(high, 1UL);
2040 
2041 	overage = usage - high;
2042 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2043 	return div64_u64(overage, high);
2044 }
2045 
mem_find_max_overage(struct mem_cgroup * memcg)2046 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2047 {
2048 	u64 overage, max_overage = 0;
2049 
2050 	do {
2051 		overage = calculate_overage(page_counter_read(&memcg->memory),
2052 					    READ_ONCE(memcg->memory.high));
2053 		max_overage = max(overage, max_overage);
2054 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2055 		 !mem_cgroup_is_root(memcg));
2056 
2057 	return max_overage;
2058 }
2059 
swap_find_max_overage(struct mem_cgroup * memcg)2060 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2061 {
2062 	u64 overage, max_overage = 0;
2063 
2064 	do {
2065 		overage = calculate_overage(page_counter_read(&memcg->swap),
2066 					    READ_ONCE(memcg->swap.high));
2067 		if (overage)
2068 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2069 		max_overage = max(overage, max_overage);
2070 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2071 		 !mem_cgroup_is_root(memcg));
2072 
2073 	return max_overage;
2074 }
2075 
2076 /*
2077  * Get the number of jiffies that we should penalise a mischievous cgroup which
2078  * is exceeding its memory.high by checking both it and its ancestors.
2079  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2080 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2081 					  unsigned int nr_pages,
2082 					  u64 max_overage)
2083 {
2084 	unsigned long penalty_jiffies;
2085 
2086 	if (!max_overage)
2087 		return 0;
2088 
2089 	/*
2090 	 * We use overage compared to memory.high to calculate the number of
2091 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2092 	 * fairly lenient on small overages, and increasingly harsh when the
2093 	 * memcg in question makes it clear that it has no intention of stopping
2094 	 * its crazy behaviour, so we exponentially increase the delay based on
2095 	 * overage amount.
2096 	 */
2097 	penalty_jiffies = max_overage * max_overage * HZ;
2098 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2099 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2100 
2101 	/*
2102 	 * Factor in the task's own contribution to the overage, such that four
2103 	 * N-sized allocations are throttled approximately the same as one
2104 	 * 4N-sized allocation.
2105 	 *
2106 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2107 	 * larger the current charge patch is than that.
2108 	 */
2109 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2110 }
2111 
2112 /*
2113  * Reclaims memory over the high limit. Called directly from
2114  * try_charge() (context permitting), as well as from the userland
2115  * return path where reclaim is always able to block.
2116  */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2117 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2118 {
2119 	unsigned long penalty_jiffies;
2120 	unsigned long pflags;
2121 	unsigned long nr_reclaimed;
2122 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2123 	int nr_retries = MAX_RECLAIM_RETRIES;
2124 	struct mem_cgroup *memcg;
2125 	bool in_retry = false;
2126 
2127 	if (likely(!nr_pages))
2128 		return;
2129 
2130 	memcg = get_mem_cgroup_from_mm(current->mm);
2131 	current->memcg_nr_pages_over_high = 0;
2132 
2133 retry_reclaim:
2134 	/*
2135 	 * Bail if the task is already exiting. Unlike memory.max,
2136 	 * memory.high enforcement isn't as strict, and there is no
2137 	 * OOM killer involved, which means the excess could already
2138 	 * be much bigger (and still growing) than it could for
2139 	 * memory.max; the dying task could get stuck in fruitless
2140 	 * reclaim for a long time, which isn't desirable.
2141 	 */
2142 	if (task_is_dying())
2143 		goto out;
2144 
2145 	/*
2146 	 * The allocating task should reclaim at least the batch size, but for
2147 	 * subsequent retries we only want to do what's necessary to prevent oom
2148 	 * or breaching resource isolation.
2149 	 *
2150 	 * This is distinct from memory.max or page allocator behaviour because
2151 	 * memory.high is currently batched, whereas memory.max and the page
2152 	 * allocator run every time an allocation is made.
2153 	 */
2154 	nr_reclaimed = reclaim_high(memcg,
2155 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2156 				    gfp_mask);
2157 
2158 	/*
2159 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2160 	 * allocators proactively to slow down excessive growth.
2161 	 */
2162 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2163 					       mem_find_max_overage(memcg));
2164 
2165 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2166 						swap_find_max_overage(memcg));
2167 
2168 	/*
2169 	 * Clamp the max delay per usermode return so as to still keep the
2170 	 * application moving forwards and also permit diagnostics, albeit
2171 	 * extremely slowly.
2172 	 */
2173 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2174 
2175 	/*
2176 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2177 	 * that it's not even worth doing, in an attempt to be nice to those who
2178 	 * go only a small amount over their memory.high value and maybe haven't
2179 	 * been aggressively reclaimed enough yet.
2180 	 */
2181 	if (penalty_jiffies <= HZ / 100)
2182 		goto out;
2183 
2184 	/*
2185 	 * If reclaim is making forward progress but we're still over
2186 	 * memory.high, we want to encourage that rather than doing allocator
2187 	 * throttling.
2188 	 */
2189 	if (nr_reclaimed || nr_retries--) {
2190 		in_retry = true;
2191 		goto retry_reclaim;
2192 	}
2193 
2194 	/*
2195 	 * Reclaim didn't manage to push usage below the limit, slow
2196 	 * this allocating task down.
2197 	 *
2198 	 * If we exit early, we're guaranteed to die (since
2199 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2200 	 * need to account for any ill-begotten jiffies to pay them off later.
2201 	 */
2202 	psi_memstall_enter(&pflags);
2203 	schedule_timeout_killable(penalty_jiffies);
2204 	psi_memstall_leave(&pflags);
2205 
2206 out:
2207 	css_put(&memcg->css);
2208 }
2209 
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2210 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2211 		     unsigned int nr_pages)
2212 {
2213 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2214 	int nr_retries = MAX_RECLAIM_RETRIES;
2215 	struct mem_cgroup *mem_over_limit;
2216 	struct page_counter *counter;
2217 	unsigned long nr_reclaimed;
2218 	bool passed_oom = false;
2219 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2220 	bool drained = false;
2221 	bool raised_max_event = false;
2222 	unsigned long pflags;
2223 
2224 retry:
2225 	if (consume_stock(memcg, nr_pages))
2226 		return 0;
2227 
2228 	if (!do_memsw_account() ||
2229 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2230 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2231 			goto done_restock;
2232 		if (do_memsw_account())
2233 			page_counter_uncharge(&memcg->memsw, batch);
2234 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2235 	} else {
2236 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2237 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2238 	}
2239 
2240 	if (batch > nr_pages) {
2241 		batch = nr_pages;
2242 		goto retry;
2243 	}
2244 
2245 	/*
2246 	 * Prevent unbounded recursion when reclaim operations need to
2247 	 * allocate memory. This might exceed the limits temporarily,
2248 	 * but we prefer facilitating memory reclaim and getting back
2249 	 * under the limit over triggering OOM kills in these cases.
2250 	 */
2251 	if (unlikely(current->flags & PF_MEMALLOC))
2252 		goto force;
2253 
2254 	if (unlikely(task_in_memcg_oom(current)))
2255 		goto nomem;
2256 
2257 	if (!gfpflags_allow_blocking(gfp_mask))
2258 		goto nomem;
2259 
2260 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2261 	raised_max_event = true;
2262 
2263 	psi_memstall_enter(&pflags);
2264 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2265 						    gfp_mask, reclaim_options, NULL);
2266 	psi_memstall_leave(&pflags);
2267 
2268 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2269 		goto retry;
2270 
2271 	if (!drained) {
2272 		drain_all_stock(mem_over_limit);
2273 		drained = true;
2274 		goto retry;
2275 	}
2276 
2277 	if (gfp_mask & __GFP_NORETRY)
2278 		goto nomem;
2279 	/*
2280 	 * Even though the limit is exceeded at this point, reclaim
2281 	 * may have been able to free some pages.  Retry the charge
2282 	 * before killing the task.
2283 	 *
2284 	 * Only for regular pages, though: huge pages are rather
2285 	 * unlikely to succeed so close to the limit, and we fall back
2286 	 * to regular pages anyway in case of failure.
2287 	 */
2288 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2289 		goto retry;
2290 
2291 	if (nr_retries--)
2292 		goto retry;
2293 
2294 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2295 		goto nomem;
2296 
2297 	/* Avoid endless loop for tasks bypassed by the oom killer */
2298 	if (passed_oom && task_is_dying())
2299 		goto nomem;
2300 
2301 	/*
2302 	 * keep retrying as long as the memcg oom killer is able to make
2303 	 * a forward progress or bypass the charge if the oom killer
2304 	 * couldn't make any progress.
2305 	 */
2306 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2307 			   get_order(nr_pages * PAGE_SIZE))) {
2308 		passed_oom = true;
2309 		nr_retries = MAX_RECLAIM_RETRIES;
2310 		goto retry;
2311 	}
2312 nomem:
2313 	/*
2314 	 * Memcg doesn't have a dedicated reserve for atomic
2315 	 * allocations. But like the global atomic pool, we need to
2316 	 * put the burden of reclaim on regular allocation requests
2317 	 * and let these go through as privileged allocations.
2318 	 */
2319 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2320 		return -ENOMEM;
2321 force:
2322 	/*
2323 	 * If the allocation has to be enforced, don't forget to raise
2324 	 * a MEMCG_MAX event.
2325 	 */
2326 	if (!raised_max_event)
2327 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2328 
2329 	/*
2330 	 * The allocation either can't fail or will lead to more memory
2331 	 * being freed very soon.  Allow memory usage go over the limit
2332 	 * temporarily by force charging it.
2333 	 */
2334 	page_counter_charge(&memcg->memory, nr_pages);
2335 	if (do_memsw_account())
2336 		page_counter_charge(&memcg->memsw, nr_pages);
2337 
2338 	return 0;
2339 
2340 done_restock:
2341 	if (batch > nr_pages)
2342 		refill_stock(memcg, batch - nr_pages);
2343 
2344 	/*
2345 	 * If the hierarchy is above the normal consumption range, schedule
2346 	 * reclaim on returning to userland.  We can perform reclaim here
2347 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2348 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2349 	 * not recorded as it most likely matches current's and won't
2350 	 * change in the meantime.  As high limit is checked again before
2351 	 * reclaim, the cost of mismatch is negligible.
2352 	 */
2353 	do {
2354 		bool mem_high, swap_high;
2355 
2356 		mem_high = page_counter_read(&memcg->memory) >
2357 			READ_ONCE(memcg->memory.high);
2358 		swap_high = page_counter_read(&memcg->swap) >
2359 			READ_ONCE(memcg->swap.high);
2360 
2361 		/* Don't bother a random interrupted task */
2362 		if (!in_task()) {
2363 			if (mem_high) {
2364 				schedule_work(&memcg->high_work);
2365 				break;
2366 			}
2367 			continue;
2368 		}
2369 
2370 		if (mem_high || swap_high) {
2371 			/*
2372 			 * The allocating tasks in this cgroup will need to do
2373 			 * reclaim or be throttled to prevent further growth
2374 			 * of the memory or swap footprints.
2375 			 *
2376 			 * Target some best-effort fairness between the tasks,
2377 			 * and distribute reclaim work and delay penalties
2378 			 * based on how much each task is actually allocating.
2379 			 */
2380 			current->memcg_nr_pages_over_high += batch;
2381 			set_notify_resume(current);
2382 			break;
2383 		}
2384 	} while ((memcg = parent_mem_cgroup(memcg)));
2385 
2386 	/*
2387 	 * Reclaim is set up above to be called from the userland
2388 	 * return path. But also attempt synchronous reclaim to avoid
2389 	 * excessive overrun while the task is still inside the
2390 	 * kernel. If this is successful, the return path will see it
2391 	 * when it rechecks the overage and simply bail out.
2392 	 */
2393 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2394 	    !(current->flags & PF_MEMALLOC) &&
2395 	    gfpflags_allow_blocking(gfp_mask))
2396 		mem_cgroup_handle_over_high(gfp_mask);
2397 	return 0;
2398 }
2399 
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2400 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2401 {
2402 	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2403 	/*
2404 	 * Any of the following ensures page's memcg stability:
2405 	 *
2406 	 * - the page lock
2407 	 * - LRU isolation
2408 	 * - exclusive reference
2409 	 */
2410 	folio->memcg_data = (unsigned long)memcg;
2411 }
2412 
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2413 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2414 				       struct pglist_data *pgdat,
2415 				       enum node_stat_item idx, int nr)
2416 {
2417 	struct mem_cgroup *memcg;
2418 	struct lruvec *lruvec;
2419 
2420 	rcu_read_lock();
2421 	memcg = obj_cgroup_memcg(objcg);
2422 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2423 	__mod_memcg_lruvec_state(lruvec, idx, nr);
2424 	rcu_read_unlock();
2425 }
2426 
2427 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2428 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2429 {
2430 	/*
2431 	 * Slab objects are accounted individually, not per-page.
2432 	 * Memcg membership data for each individual object is saved in
2433 	 * slab->obj_exts.
2434 	 */
2435 	if (folio_test_slab(folio)) {
2436 		struct slabobj_ext *obj_exts;
2437 		struct slab *slab;
2438 		unsigned int off;
2439 
2440 		slab = folio_slab(folio);
2441 		obj_exts = slab_obj_exts(slab);
2442 		if (!obj_exts)
2443 			return NULL;
2444 
2445 		off = obj_to_index(slab->slab_cache, slab, p);
2446 		if (obj_exts[off].objcg)
2447 			return obj_cgroup_memcg(obj_exts[off].objcg);
2448 
2449 		return NULL;
2450 	}
2451 
2452 	/*
2453 	 * folio_memcg_check() is used here, because in theory we can encounter
2454 	 * a folio where the slab flag has been cleared already, but
2455 	 * slab->obj_exts has not been freed yet
2456 	 * folio_memcg_check() will guarantee that a proper memory
2457 	 * cgroup pointer or NULL will be returned.
2458 	 */
2459 	return folio_memcg_check(folio);
2460 }
2461 
2462 /*
2463  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2464  * It is not suitable for objects allocated using vmalloc().
2465  *
2466  * A passed kernel object must be a slab object or a generic kernel page.
2467  *
2468  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2469  * cgroup_mutex, etc.
2470  */
mem_cgroup_from_slab_obj(void * p)2471 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2472 {
2473 	if (mem_cgroup_disabled())
2474 		return NULL;
2475 
2476 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2477 }
2478 
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2479 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2480 {
2481 	struct obj_cgroup *objcg = NULL;
2482 
2483 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2484 		objcg = rcu_dereference(memcg->objcg);
2485 		if (likely(objcg && obj_cgroup_tryget(objcg)))
2486 			break;
2487 		objcg = NULL;
2488 	}
2489 	return objcg;
2490 }
2491 
current_objcg_update(void)2492 static struct obj_cgroup *current_objcg_update(void)
2493 {
2494 	struct mem_cgroup *memcg;
2495 	struct obj_cgroup *old, *objcg = NULL;
2496 
2497 	do {
2498 		/* Atomically drop the update bit. */
2499 		old = xchg(&current->objcg, NULL);
2500 		if (old) {
2501 			old = (struct obj_cgroup *)
2502 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2503 			obj_cgroup_put(old);
2504 
2505 			old = NULL;
2506 		}
2507 
2508 		/* If new objcg is NULL, no reason for the second atomic update. */
2509 		if (!current->mm || (current->flags & PF_KTHREAD))
2510 			return NULL;
2511 
2512 		/*
2513 		 * Release the objcg pointer from the previous iteration,
2514 		 * if try_cmpxcg() below fails.
2515 		 */
2516 		if (unlikely(objcg)) {
2517 			obj_cgroup_put(objcg);
2518 			objcg = NULL;
2519 		}
2520 
2521 		/*
2522 		 * Obtain the new objcg pointer. The current task can be
2523 		 * asynchronously moved to another memcg and the previous
2524 		 * memcg can be offlined. So let's get the memcg pointer
2525 		 * and try get a reference to objcg under a rcu read lock.
2526 		 */
2527 
2528 		rcu_read_lock();
2529 		memcg = mem_cgroup_from_task(current);
2530 		objcg = __get_obj_cgroup_from_memcg(memcg);
2531 		rcu_read_unlock();
2532 
2533 		/*
2534 		 * Try set up a new objcg pointer atomically. If it
2535 		 * fails, it means the update flag was set concurrently, so
2536 		 * the whole procedure should be repeated.
2537 		 */
2538 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2539 
2540 	return objcg;
2541 }
2542 
current_obj_cgroup(void)2543 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2544 {
2545 	struct mem_cgroup *memcg;
2546 	struct obj_cgroup *objcg;
2547 
2548 	if (in_task()) {
2549 		memcg = current->active_memcg;
2550 		if (unlikely(memcg))
2551 			goto from_memcg;
2552 
2553 		objcg = READ_ONCE(current->objcg);
2554 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2555 			objcg = current_objcg_update();
2556 		/*
2557 		 * Objcg reference is kept by the task, so it's safe
2558 		 * to use the objcg by the current task.
2559 		 */
2560 		return objcg;
2561 	}
2562 
2563 	memcg = this_cpu_read(int_active_memcg);
2564 	if (unlikely(memcg))
2565 		goto from_memcg;
2566 
2567 	return NULL;
2568 
2569 from_memcg:
2570 	objcg = NULL;
2571 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2572 		/*
2573 		 * Memcg pointer is protected by scope (see set_active_memcg())
2574 		 * and is pinning the corresponding objcg, so objcg can't go
2575 		 * away and can be used within the scope without any additional
2576 		 * protection.
2577 		 */
2578 		objcg = rcu_dereference_check(memcg->objcg, 1);
2579 		if (likely(objcg))
2580 			break;
2581 	}
2582 
2583 	return objcg;
2584 }
2585 
get_obj_cgroup_from_folio(struct folio * folio)2586 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2587 {
2588 	struct obj_cgroup *objcg;
2589 
2590 	if (!memcg_kmem_online())
2591 		return NULL;
2592 
2593 	if (folio_memcg_kmem(folio)) {
2594 		objcg = __folio_objcg(folio);
2595 		obj_cgroup_get(objcg);
2596 	} else {
2597 		struct mem_cgroup *memcg;
2598 
2599 		rcu_read_lock();
2600 		memcg = __folio_memcg(folio);
2601 		if (memcg)
2602 			objcg = __get_obj_cgroup_from_memcg(memcg);
2603 		else
2604 			objcg = NULL;
2605 		rcu_read_unlock();
2606 	}
2607 	return objcg;
2608 }
2609 
2610 /*
2611  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2612  * @objcg: object cgroup to uncharge
2613  * @nr_pages: number of pages to uncharge
2614  */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2615 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2616 				      unsigned int nr_pages)
2617 {
2618 	struct mem_cgroup *memcg;
2619 
2620 	memcg = get_mem_cgroup_from_objcg(objcg);
2621 
2622 	mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2623 	memcg1_account_kmem(memcg, -nr_pages);
2624 	refill_stock(memcg, nr_pages);
2625 
2626 	css_put(&memcg->css);
2627 }
2628 
2629 /*
2630  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2631  * @objcg: object cgroup to charge
2632  * @gfp: reclaim mode
2633  * @nr_pages: number of pages to charge
2634  *
2635  * Returns 0 on success, an error code on failure.
2636  */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2637 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2638 				   unsigned int nr_pages)
2639 {
2640 	struct mem_cgroup *memcg;
2641 	int ret;
2642 
2643 	memcg = get_mem_cgroup_from_objcg(objcg);
2644 
2645 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2646 	if (ret)
2647 		goto out;
2648 
2649 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2650 	memcg1_account_kmem(memcg, nr_pages);
2651 out:
2652 	css_put(&memcg->css);
2653 
2654 	return ret;
2655 }
2656 
2657 /**
2658  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2659  * @page: page to charge
2660  * @gfp: reclaim mode
2661  * @order: allocation order
2662  *
2663  * Returns 0 on success, an error code on failure.
2664  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2665 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2666 {
2667 	struct obj_cgroup *objcg;
2668 	int ret = 0;
2669 
2670 	objcg = current_obj_cgroup();
2671 	if (objcg) {
2672 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2673 		if (!ret) {
2674 			obj_cgroup_get(objcg);
2675 			page->memcg_data = (unsigned long)objcg |
2676 				MEMCG_DATA_KMEM;
2677 			return 0;
2678 		}
2679 	}
2680 	return ret;
2681 }
2682 
2683 /**
2684  * __memcg_kmem_uncharge_page: uncharge a kmem page
2685  * @page: page to uncharge
2686  * @order: allocation order
2687  */
__memcg_kmem_uncharge_page(struct page * page,int order)2688 void __memcg_kmem_uncharge_page(struct page *page, int order)
2689 {
2690 	struct folio *folio = page_folio(page);
2691 	struct obj_cgroup *objcg;
2692 	unsigned int nr_pages = 1 << order;
2693 
2694 	if (!folio_memcg_kmem(folio))
2695 		return;
2696 
2697 	objcg = __folio_objcg(folio);
2698 	obj_cgroup_uncharge_pages(objcg, nr_pages);
2699 	folio->memcg_data = 0;
2700 	obj_cgroup_put(objcg);
2701 }
2702 
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2703 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2704 		     enum node_stat_item idx, int nr)
2705 {
2706 	struct memcg_stock_pcp *stock;
2707 	struct obj_cgroup *old = NULL;
2708 	unsigned long flags;
2709 	int *bytes;
2710 
2711 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2712 	stock = this_cpu_ptr(&memcg_stock);
2713 
2714 	/*
2715 	 * Save vmstat data in stock and skip vmstat array update unless
2716 	 * accumulating over a page of vmstat data or when pgdat or idx
2717 	 * changes.
2718 	 */
2719 	if (READ_ONCE(stock->cached_objcg) != objcg) {
2720 		old = drain_obj_stock(stock);
2721 		obj_cgroup_get(objcg);
2722 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2723 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2724 		WRITE_ONCE(stock->cached_objcg, objcg);
2725 		stock->cached_pgdat = pgdat;
2726 	} else if (stock->cached_pgdat != pgdat) {
2727 		/* Flush the existing cached vmstat data */
2728 		struct pglist_data *oldpg = stock->cached_pgdat;
2729 
2730 		if (stock->nr_slab_reclaimable_b) {
2731 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2732 					  stock->nr_slab_reclaimable_b);
2733 			stock->nr_slab_reclaimable_b = 0;
2734 		}
2735 		if (stock->nr_slab_unreclaimable_b) {
2736 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2737 					  stock->nr_slab_unreclaimable_b);
2738 			stock->nr_slab_unreclaimable_b = 0;
2739 		}
2740 		stock->cached_pgdat = pgdat;
2741 	}
2742 
2743 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2744 					       : &stock->nr_slab_unreclaimable_b;
2745 	/*
2746 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2747 	 * cached locally at least once before pushing it out.
2748 	 */
2749 	if (!*bytes) {
2750 		*bytes = nr;
2751 		nr = 0;
2752 	} else {
2753 		*bytes += nr;
2754 		if (abs(*bytes) > PAGE_SIZE) {
2755 			nr = *bytes;
2756 			*bytes = 0;
2757 		} else {
2758 			nr = 0;
2759 		}
2760 	}
2761 	if (nr)
2762 		__mod_objcg_mlstate(objcg, pgdat, idx, nr);
2763 
2764 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2765 	obj_cgroup_put(old);
2766 }
2767 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2768 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2769 {
2770 	struct memcg_stock_pcp *stock;
2771 	unsigned long flags;
2772 	bool ret = false;
2773 
2774 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2775 
2776 	stock = this_cpu_ptr(&memcg_stock);
2777 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2778 		stock->nr_bytes -= nr_bytes;
2779 		ret = true;
2780 	}
2781 
2782 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2783 
2784 	return ret;
2785 }
2786 
drain_obj_stock(struct memcg_stock_pcp * stock)2787 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2788 {
2789 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2790 
2791 	if (!old)
2792 		return NULL;
2793 
2794 	if (stock->nr_bytes) {
2795 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2796 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2797 
2798 		if (nr_pages) {
2799 			struct mem_cgroup *memcg;
2800 
2801 			memcg = get_mem_cgroup_from_objcg(old);
2802 
2803 			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2804 			memcg1_account_kmem(memcg, -nr_pages);
2805 			__refill_stock(memcg, nr_pages);
2806 
2807 			css_put(&memcg->css);
2808 		}
2809 
2810 		/*
2811 		 * The leftover is flushed to the centralized per-memcg value.
2812 		 * On the next attempt to refill obj stock it will be moved
2813 		 * to a per-cpu stock (probably, on an other CPU), see
2814 		 * refill_obj_stock().
2815 		 *
2816 		 * How often it's flushed is a trade-off between the memory
2817 		 * limit enforcement accuracy and potential CPU contention,
2818 		 * so it might be changed in the future.
2819 		 */
2820 		atomic_add(nr_bytes, &old->nr_charged_bytes);
2821 		stock->nr_bytes = 0;
2822 	}
2823 
2824 	/*
2825 	 * Flush the vmstat data in current stock
2826 	 */
2827 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2828 		if (stock->nr_slab_reclaimable_b) {
2829 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2830 					  NR_SLAB_RECLAIMABLE_B,
2831 					  stock->nr_slab_reclaimable_b);
2832 			stock->nr_slab_reclaimable_b = 0;
2833 		}
2834 		if (stock->nr_slab_unreclaimable_b) {
2835 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2836 					  NR_SLAB_UNRECLAIMABLE_B,
2837 					  stock->nr_slab_unreclaimable_b);
2838 			stock->nr_slab_unreclaimable_b = 0;
2839 		}
2840 		stock->cached_pgdat = NULL;
2841 	}
2842 
2843 	WRITE_ONCE(stock->cached_objcg, NULL);
2844 	/*
2845 	 * The `old' objects needs to be released by the caller via
2846 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2847 	 */
2848 	return old;
2849 }
2850 
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2851 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2852 				     struct mem_cgroup *root_memcg)
2853 {
2854 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2855 	struct mem_cgroup *memcg;
2856 
2857 	if (objcg) {
2858 		memcg = obj_cgroup_memcg(objcg);
2859 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2860 			return true;
2861 	}
2862 
2863 	return false;
2864 }
2865 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2866 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2867 			     bool allow_uncharge)
2868 {
2869 	struct memcg_stock_pcp *stock;
2870 	struct obj_cgroup *old = NULL;
2871 	unsigned long flags;
2872 	unsigned int nr_pages = 0;
2873 
2874 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2875 
2876 	stock = this_cpu_ptr(&memcg_stock);
2877 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2878 		old = drain_obj_stock(stock);
2879 		obj_cgroup_get(objcg);
2880 		WRITE_ONCE(stock->cached_objcg, objcg);
2881 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2882 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2883 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
2884 	}
2885 	stock->nr_bytes += nr_bytes;
2886 
2887 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2888 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2889 		stock->nr_bytes &= (PAGE_SIZE - 1);
2890 	}
2891 
2892 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2893 	obj_cgroup_put(old);
2894 
2895 	if (nr_pages)
2896 		obj_cgroup_uncharge_pages(objcg, nr_pages);
2897 }
2898 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2899 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2900 {
2901 	unsigned int nr_pages, nr_bytes;
2902 	int ret;
2903 
2904 	if (consume_obj_stock(objcg, size))
2905 		return 0;
2906 
2907 	/*
2908 	 * In theory, objcg->nr_charged_bytes can have enough
2909 	 * pre-charged bytes to satisfy the allocation. However,
2910 	 * flushing objcg->nr_charged_bytes requires two atomic
2911 	 * operations, and objcg->nr_charged_bytes can't be big.
2912 	 * The shared objcg->nr_charged_bytes can also become a
2913 	 * performance bottleneck if all tasks of the same memcg are
2914 	 * trying to update it. So it's better to ignore it and try
2915 	 * grab some new pages. The stock's nr_bytes will be flushed to
2916 	 * objcg->nr_charged_bytes later on when objcg changes.
2917 	 *
2918 	 * The stock's nr_bytes may contain enough pre-charged bytes
2919 	 * to allow one less page from being charged, but we can't rely
2920 	 * on the pre-charged bytes not being changed outside of
2921 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
2922 	 * pre-charged bytes as well when charging pages. To avoid a
2923 	 * page uncharge right after a page charge, we set the
2924 	 * allow_uncharge flag to false when calling refill_obj_stock()
2925 	 * to temporarily allow the pre-charged bytes to exceed the page
2926 	 * size limit. The maximum reachable value of the pre-charged
2927 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2928 	 * race.
2929 	 */
2930 	nr_pages = size >> PAGE_SHIFT;
2931 	nr_bytes = size & (PAGE_SIZE - 1);
2932 
2933 	if (nr_bytes)
2934 		nr_pages += 1;
2935 
2936 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2937 	if (!ret && nr_bytes)
2938 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2939 
2940 	return ret;
2941 }
2942 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)2943 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2944 {
2945 	refill_obj_stock(objcg, size, true);
2946 }
2947 
obj_full_size(struct kmem_cache * s)2948 static inline size_t obj_full_size(struct kmem_cache *s)
2949 {
2950 	/*
2951 	 * For each accounted object there is an extra space which is used
2952 	 * to store obj_cgroup membership. Charge it too.
2953 	 */
2954 	return s->size + sizeof(struct obj_cgroup *);
2955 }
2956 
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2957 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2958 				  gfp_t flags, size_t size, void **p)
2959 {
2960 	struct obj_cgroup *objcg;
2961 	struct slab *slab;
2962 	unsigned long off;
2963 	size_t i;
2964 
2965 	/*
2966 	 * The obtained objcg pointer is safe to use within the current scope,
2967 	 * defined by current task or set_active_memcg() pair.
2968 	 * obj_cgroup_get() is used to get a permanent reference.
2969 	 */
2970 	objcg = current_obj_cgroup();
2971 	if (!objcg)
2972 		return true;
2973 
2974 	/*
2975 	 * slab_alloc_node() avoids the NULL check, so we might be called with a
2976 	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2977 	 * the whole requested size.
2978 	 * return success as there's nothing to free back
2979 	 */
2980 	if (unlikely(*p == NULL))
2981 		return true;
2982 
2983 	flags &= gfp_allowed_mask;
2984 
2985 	if (lru) {
2986 		int ret;
2987 		struct mem_cgroup *memcg;
2988 
2989 		memcg = get_mem_cgroup_from_objcg(objcg);
2990 		ret = memcg_list_lru_alloc(memcg, lru, flags);
2991 		css_put(&memcg->css);
2992 
2993 		if (ret)
2994 			return false;
2995 	}
2996 
2997 	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
2998 		return false;
2999 
3000 	for (i = 0; i < size; i++) {
3001 		slab = virt_to_slab(p[i]);
3002 
3003 		if (!slab_obj_exts(slab) &&
3004 		    alloc_slab_obj_exts(slab, s, flags, false)) {
3005 			obj_cgroup_uncharge(objcg, obj_full_size(s));
3006 			continue;
3007 		}
3008 
3009 		off = obj_to_index(s, slab, p[i]);
3010 		obj_cgroup_get(objcg);
3011 		slab_obj_exts(slab)[off].objcg = objcg;
3012 		mod_objcg_state(objcg, slab_pgdat(slab),
3013 				cache_vmstat_idx(s), obj_full_size(s));
3014 	}
3015 
3016 	return true;
3017 }
3018 
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3019 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3020 			    void **p, int objects, struct slabobj_ext *obj_exts)
3021 {
3022 	for (int i = 0; i < objects; i++) {
3023 		struct obj_cgroup *objcg;
3024 		unsigned int off;
3025 
3026 		off = obj_to_index(s, slab, p[i]);
3027 		objcg = obj_exts[off].objcg;
3028 		if (!objcg)
3029 			continue;
3030 
3031 		obj_exts[off].objcg = NULL;
3032 		obj_cgroup_uncharge(objcg, obj_full_size(s));
3033 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3034 				-obj_full_size(s));
3035 		obj_cgroup_put(objcg);
3036 	}
3037 }
3038 
3039 /*
3040  * Because folio_memcg(head) is not set on tails, set it now.
3041  */
split_page_memcg(struct page * head,int old_order,int new_order)3042 void split_page_memcg(struct page *head, int old_order, int new_order)
3043 {
3044 	struct folio *folio = page_folio(head);
3045 	int i;
3046 	unsigned int old_nr = 1 << old_order;
3047 	unsigned int new_nr = 1 << new_order;
3048 
3049 	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3050 		return;
3051 
3052 	for (i = new_nr; i < old_nr; i += new_nr)
3053 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3054 
3055 	if (folio_memcg_kmem(folio))
3056 		obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3057 	else
3058 		css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3059 }
3060 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3061 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3062 {
3063 	unsigned long val;
3064 
3065 	if (mem_cgroup_is_root(memcg)) {
3066 		/*
3067 		 * Approximate root's usage from global state. This isn't
3068 		 * perfect, but the root usage was always an approximation.
3069 		 */
3070 		val = global_node_page_state(NR_FILE_PAGES) +
3071 			global_node_page_state(NR_ANON_MAPPED);
3072 		if (swap)
3073 			val += total_swap_pages - get_nr_swap_pages();
3074 	} else {
3075 		if (!swap)
3076 			val = page_counter_read(&memcg->memory);
3077 		else
3078 			val = page_counter_read(&memcg->memsw);
3079 	}
3080 	return val;
3081 }
3082 
memcg_online_kmem(struct mem_cgroup * memcg)3083 static int memcg_online_kmem(struct mem_cgroup *memcg)
3084 {
3085 	struct obj_cgroup *objcg;
3086 
3087 	if (mem_cgroup_kmem_disabled())
3088 		return 0;
3089 
3090 	if (unlikely(mem_cgroup_is_root(memcg)))
3091 		return 0;
3092 
3093 	objcg = obj_cgroup_alloc();
3094 	if (!objcg)
3095 		return -ENOMEM;
3096 
3097 	objcg->memcg = memcg;
3098 	rcu_assign_pointer(memcg->objcg, objcg);
3099 	obj_cgroup_get(objcg);
3100 	memcg->orig_objcg = objcg;
3101 
3102 	static_branch_enable(&memcg_kmem_online_key);
3103 
3104 	memcg->kmemcg_id = memcg->id.id;
3105 
3106 	return 0;
3107 }
3108 
memcg_offline_kmem(struct mem_cgroup * memcg)3109 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3110 {
3111 	struct mem_cgroup *parent;
3112 
3113 	if (mem_cgroup_kmem_disabled())
3114 		return;
3115 
3116 	if (unlikely(mem_cgroup_is_root(memcg)))
3117 		return;
3118 
3119 	parent = parent_mem_cgroup(memcg);
3120 	if (!parent)
3121 		parent = root_mem_cgroup;
3122 
3123 	memcg_reparent_list_lrus(memcg, parent);
3124 
3125 	/*
3126 	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3127 	 * helpers won't use parent's list_lru until child is drained.
3128 	 */
3129 	memcg_reparent_objcgs(memcg, parent);
3130 }
3131 
3132 #ifdef CONFIG_CGROUP_WRITEBACK
3133 
3134 #include <trace/events/writeback.h>
3135 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3136 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3137 {
3138 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3139 }
3140 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3141 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3142 {
3143 	wb_domain_exit(&memcg->cgwb_domain);
3144 }
3145 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3146 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3147 {
3148 	wb_domain_size_changed(&memcg->cgwb_domain);
3149 }
3150 
mem_cgroup_wb_domain(struct bdi_writeback * wb)3151 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3152 {
3153 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3154 
3155 	if (!memcg->css.parent)
3156 		return NULL;
3157 
3158 	return &memcg->cgwb_domain;
3159 }
3160 
3161 /**
3162  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3163  * @wb: bdi_writeback in question
3164  * @pfilepages: out parameter for number of file pages
3165  * @pheadroom: out parameter for number of allocatable pages according to memcg
3166  * @pdirty: out parameter for number of dirty pages
3167  * @pwriteback: out parameter for number of pages under writeback
3168  *
3169  * Determine the numbers of file, headroom, dirty, and writeback pages in
3170  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3171  * is a bit more involved.
3172  *
3173  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3174  * headroom is calculated as the lowest headroom of itself and the
3175  * ancestors.  Note that this doesn't consider the actual amount of
3176  * available memory in the system.  The caller should further cap
3177  * *@pheadroom accordingly.
3178  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3179 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3180 			 unsigned long *pheadroom, unsigned long *pdirty,
3181 			 unsigned long *pwriteback)
3182 {
3183 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3184 	struct mem_cgroup *parent;
3185 
3186 	mem_cgroup_flush_stats_ratelimited(memcg);
3187 
3188 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3189 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3190 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3191 			memcg_page_state(memcg, NR_ACTIVE_FILE);
3192 
3193 	*pheadroom = PAGE_COUNTER_MAX;
3194 	while ((parent = parent_mem_cgroup(memcg))) {
3195 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3196 					    READ_ONCE(memcg->memory.high));
3197 		unsigned long used = page_counter_read(&memcg->memory);
3198 
3199 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3200 		memcg = parent;
3201 	}
3202 }
3203 
3204 /*
3205  * Foreign dirty flushing
3206  *
3207  * There's an inherent mismatch between memcg and writeback.  The former
3208  * tracks ownership per-page while the latter per-inode.  This was a
3209  * deliberate design decision because honoring per-page ownership in the
3210  * writeback path is complicated, may lead to higher CPU and IO overheads
3211  * and deemed unnecessary given that write-sharing an inode across
3212  * different cgroups isn't a common use-case.
3213  *
3214  * Combined with inode majority-writer ownership switching, this works well
3215  * enough in most cases but there are some pathological cases.  For
3216  * example, let's say there are two cgroups A and B which keep writing to
3217  * different but confined parts of the same inode.  B owns the inode and
3218  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3219  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3220  * triggering background writeback.  A will be slowed down without a way to
3221  * make writeback of the dirty pages happen.
3222  *
3223  * Conditions like the above can lead to a cgroup getting repeatedly and
3224  * severely throttled after making some progress after each
3225  * dirty_expire_interval while the underlying IO device is almost
3226  * completely idle.
3227  *
3228  * Solving this problem completely requires matching the ownership tracking
3229  * granularities between memcg and writeback in either direction.  However,
3230  * the more egregious behaviors can be avoided by simply remembering the
3231  * most recent foreign dirtying events and initiating remote flushes on
3232  * them when local writeback isn't enough to keep the memory clean enough.
3233  *
3234  * The following two functions implement such mechanism.  When a foreign
3235  * page - a page whose memcg and writeback ownerships don't match - is
3236  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3237  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3238  * decides that the memcg needs to sleep due to high dirty ratio, it calls
3239  * mem_cgroup_flush_foreign() which queues writeback on the recorded
3240  * foreign bdi_writebacks which haven't expired.  Both the numbers of
3241  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3242  * limited to MEMCG_CGWB_FRN_CNT.
3243  *
3244  * The mechanism only remembers IDs and doesn't hold any object references.
3245  * As being wrong occasionally doesn't matter, updates and accesses to the
3246  * records are lockless and racy.
3247  */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3248 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3249 					     struct bdi_writeback *wb)
3250 {
3251 	struct mem_cgroup *memcg = folio_memcg(folio);
3252 	struct memcg_cgwb_frn *frn;
3253 	u64 now = get_jiffies_64();
3254 	u64 oldest_at = now;
3255 	int oldest = -1;
3256 	int i;
3257 
3258 	trace_track_foreign_dirty(folio, wb);
3259 
3260 	/*
3261 	 * Pick the slot to use.  If there is already a slot for @wb, keep
3262 	 * using it.  If not replace the oldest one which isn't being
3263 	 * written out.
3264 	 */
3265 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3266 		frn = &memcg->cgwb_frn[i];
3267 		if (frn->bdi_id == wb->bdi->id &&
3268 		    frn->memcg_id == wb->memcg_css->id)
3269 			break;
3270 		if (time_before64(frn->at, oldest_at) &&
3271 		    atomic_read(&frn->done.cnt) == 1) {
3272 			oldest = i;
3273 			oldest_at = frn->at;
3274 		}
3275 	}
3276 
3277 	if (i < MEMCG_CGWB_FRN_CNT) {
3278 		/*
3279 		 * Re-using an existing one.  Update timestamp lazily to
3280 		 * avoid making the cacheline hot.  We want them to be
3281 		 * reasonably up-to-date and significantly shorter than
3282 		 * dirty_expire_interval as that's what expires the record.
3283 		 * Use the shorter of 1s and dirty_expire_interval / 8.
3284 		 */
3285 		unsigned long update_intv =
3286 			min_t(unsigned long, HZ,
3287 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3288 
3289 		if (time_before64(frn->at, now - update_intv))
3290 			frn->at = now;
3291 	} else if (oldest >= 0) {
3292 		/* replace the oldest free one */
3293 		frn = &memcg->cgwb_frn[oldest];
3294 		frn->bdi_id = wb->bdi->id;
3295 		frn->memcg_id = wb->memcg_css->id;
3296 		frn->at = now;
3297 	}
3298 }
3299 
3300 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3301 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3302 {
3303 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3304 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3305 	u64 now = jiffies_64;
3306 	int i;
3307 
3308 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3309 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3310 
3311 		/*
3312 		 * If the record is older than dirty_expire_interval,
3313 		 * writeback on it has already started.  No need to kick it
3314 		 * off again.  Also, don't start a new one if there's
3315 		 * already one in flight.
3316 		 */
3317 		if (time_after64(frn->at, now - intv) &&
3318 		    atomic_read(&frn->done.cnt) == 1) {
3319 			frn->at = 0;
3320 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3321 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3322 					       WB_REASON_FOREIGN_FLUSH,
3323 					       &frn->done);
3324 		}
3325 	}
3326 }
3327 
3328 #else	/* CONFIG_CGROUP_WRITEBACK */
3329 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3330 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3331 {
3332 	return 0;
3333 }
3334 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3335 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3336 {
3337 }
3338 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3339 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3340 {
3341 }
3342 
3343 #endif	/* CONFIG_CGROUP_WRITEBACK */
3344 
3345 /*
3346  * Private memory cgroup IDR
3347  *
3348  * Swap-out records and page cache shadow entries need to store memcg
3349  * references in constrained space, so we maintain an ID space that is
3350  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3351  * memory-controlled cgroups to 64k.
3352  *
3353  * However, there usually are many references to the offline CSS after
3354  * the cgroup has been destroyed, such as page cache or reclaimable
3355  * slab objects, that don't need to hang on to the ID. We want to keep
3356  * those dead CSS from occupying IDs, or we might quickly exhaust the
3357  * relatively small ID space and prevent the creation of new cgroups
3358  * even when there are much fewer than 64k cgroups - possibly none.
3359  *
3360  * Maintain a private 16-bit ID space for memcg, and allow the ID to
3361  * be freed and recycled when it's no longer needed, which is usually
3362  * when the CSS is offlined.
3363  *
3364  * The only exception to that are records of swapped out tmpfs/shmem
3365  * pages that need to be attributed to live ancestors on swapin. But
3366  * those references are manageable from userspace.
3367  */
3368 
3369 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3370 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3371 
mem_cgroup_id_remove(struct mem_cgroup * memcg)3372 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3373 {
3374 	if (memcg->id.id > 0) {
3375 		xa_erase(&mem_cgroup_ids, memcg->id.id);
3376 		memcg->id.id = 0;
3377 	}
3378 }
3379 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3380 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3381 					   unsigned int n)
3382 {
3383 	refcount_add(n, &memcg->id.ref);
3384 }
3385 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3386 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3387 {
3388 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3389 		mem_cgroup_id_remove(memcg);
3390 
3391 		/* Memcg ID pins CSS */
3392 		css_put(&memcg->css);
3393 	}
3394 }
3395 
mem_cgroup_id_put(struct mem_cgroup * memcg)3396 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3397 {
3398 	mem_cgroup_id_put_many(memcg, 1);
3399 }
3400 
3401 /**
3402  * mem_cgroup_from_id - look up a memcg from a memcg id
3403  * @id: the memcg id to look up
3404  *
3405  * Caller must hold rcu_read_lock().
3406  */
mem_cgroup_from_id(unsigned short id)3407 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3408 {
3409 	WARN_ON_ONCE(!rcu_read_lock_held());
3410 	return xa_load(&mem_cgroup_ids, id);
3411 }
3412 
3413 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3414 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3415 {
3416 	struct cgroup *cgrp;
3417 	struct cgroup_subsys_state *css;
3418 	struct mem_cgroup *memcg;
3419 
3420 	cgrp = cgroup_get_from_id(ino);
3421 	if (IS_ERR(cgrp))
3422 		return ERR_CAST(cgrp);
3423 
3424 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3425 	if (css)
3426 		memcg = container_of(css, struct mem_cgroup, css);
3427 	else
3428 		memcg = ERR_PTR(-ENOENT);
3429 
3430 	cgroup_put(cgrp);
3431 
3432 	return memcg;
3433 }
3434 #endif
3435 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3436 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3437 {
3438 	struct mem_cgroup_per_node *pn;
3439 
3440 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3441 	if (!pn)
3442 		return false;
3443 
3444 	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3445 					GFP_KERNEL_ACCOUNT, node);
3446 	if (!pn->lruvec_stats)
3447 		goto fail;
3448 
3449 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3450 						   GFP_KERNEL_ACCOUNT);
3451 	if (!pn->lruvec_stats_percpu)
3452 		goto fail;
3453 
3454 	lruvec_init(&pn->lruvec);
3455 	pn->memcg = memcg;
3456 
3457 	memcg->nodeinfo[node] = pn;
3458 	return true;
3459 fail:
3460 	kfree(pn->lruvec_stats);
3461 	kfree(pn);
3462 	return false;
3463 }
3464 
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3465 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3466 {
3467 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3468 
3469 	if (!pn)
3470 		return;
3471 
3472 	free_percpu(pn->lruvec_stats_percpu);
3473 	kfree(pn->lruvec_stats);
3474 	kfree(pn);
3475 }
3476 
__mem_cgroup_free(struct mem_cgroup * memcg)3477 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3478 {
3479 	int node;
3480 
3481 	obj_cgroup_put(memcg->orig_objcg);
3482 
3483 	for_each_node(node)
3484 		free_mem_cgroup_per_node_info(memcg, node);
3485 	memcg1_free_events(memcg);
3486 	kfree(memcg->vmstats);
3487 	free_percpu(memcg->vmstats_percpu);
3488 	kfree(memcg);
3489 }
3490 
mem_cgroup_free(struct mem_cgroup * memcg)3491 static void mem_cgroup_free(struct mem_cgroup *memcg)
3492 {
3493 	lru_gen_exit_memcg(memcg);
3494 	memcg_wb_domain_exit(memcg);
3495 	__mem_cgroup_free(memcg);
3496 }
3497 
mem_cgroup_alloc(struct mem_cgroup * parent)3498 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3499 {
3500 	struct memcg_vmstats_percpu *statc, *pstatc;
3501 	struct mem_cgroup *memcg;
3502 	int node, cpu;
3503 	int __maybe_unused i;
3504 	long error;
3505 
3506 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3507 	if (!memcg)
3508 		return ERR_PTR(-ENOMEM);
3509 
3510 	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3511 			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3512 	if (error)
3513 		goto fail;
3514 	error = -ENOMEM;
3515 
3516 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3517 				 GFP_KERNEL_ACCOUNT);
3518 	if (!memcg->vmstats)
3519 		goto fail;
3520 
3521 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3522 						 GFP_KERNEL_ACCOUNT);
3523 	if (!memcg->vmstats_percpu)
3524 		goto fail;
3525 
3526 	if (!memcg1_alloc_events(memcg))
3527 		goto fail;
3528 
3529 	for_each_possible_cpu(cpu) {
3530 		if (parent)
3531 			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3532 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3533 		statc->parent = parent ? pstatc : NULL;
3534 		statc->vmstats = memcg->vmstats;
3535 	}
3536 
3537 	for_each_node(node)
3538 		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3539 			goto fail;
3540 
3541 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3542 		goto fail;
3543 
3544 	INIT_WORK(&memcg->high_work, high_work_func);
3545 	vmpressure_init(&memcg->vmpressure);
3546 	INIT_LIST_HEAD(&memcg->memory_peaks);
3547 	INIT_LIST_HEAD(&memcg->swap_peaks);
3548 	spin_lock_init(&memcg->peaks_lock);
3549 	memcg->socket_pressure = jiffies;
3550 	memcg1_memcg_init(memcg);
3551 	memcg->kmemcg_id = -1;
3552 	INIT_LIST_HEAD(&memcg->objcg_list);
3553 #ifdef CONFIG_CGROUP_WRITEBACK
3554 	INIT_LIST_HEAD(&memcg->cgwb_list);
3555 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3556 		memcg->cgwb_frn[i].done =
3557 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3558 #endif
3559 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3560 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3561 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3562 	memcg->deferred_split_queue.split_queue_len = 0;
3563 #endif
3564 	lru_gen_init_memcg(memcg);
3565 	return memcg;
3566 fail:
3567 	mem_cgroup_id_remove(memcg);
3568 	__mem_cgroup_free(memcg);
3569 	return ERR_PTR(error);
3570 }
3571 
3572 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3573 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3574 {
3575 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3576 	struct mem_cgroup *memcg, *old_memcg;
3577 
3578 	old_memcg = set_active_memcg(parent);
3579 	memcg = mem_cgroup_alloc(parent);
3580 	set_active_memcg(old_memcg);
3581 	if (IS_ERR(memcg))
3582 		return ERR_CAST(memcg);
3583 
3584 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3585 	memcg1_soft_limit_reset(memcg);
3586 #ifdef CONFIG_ZSWAP
3587 	memcg->zswap_max = PAGE_COUNTER_MAX;
3588 	WRITE_ONCE(memcg->zswap_writeback, true);
3589 #endif
3590 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3591 	if (parent) {
3592 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3593 
3594 		page_counter_init(&memcg->memory, &parent->memory, true);
3595 		page_counter_init(&memcg->swap, &parent->swap, false);
3596 #ifdef CONFIG_MEMCG_V1
3597 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3598 		page_counter_init(&memcg->kmem, &parent->kmem, false);
3599 		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3600 #endif
3601 	} else {
3602 		init_memcg_stats();
3603 		init_memcg_events();
3604 		page_counter_init(&memcg->memory, NULL, true);
3605 		page_counter_init(&memcg->swap, NULL, false);
3606 #ifdef CONFIG_MEMCG_V1
3607 		page_counter_init(&memcg->kmem, NULL, false);
3608 		page_counter_init(&memcg->tcpmem, NULL, false);
3609 #endif
3610 		root_mem_cgroup = memcg;
3611 		return &memcg->css;
3612 	}
3613 
3614 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3615 		static_branch_inc(&memcg_sockets_enabled_key);
3616 
3617 	if (!cgroup_memory_nobpf)
3618 		static_branch_inc(&memcg_bpf_enabled_key);
3619 
3620 	return &memcg->css;
3621 }
3622 
mem_cgroup_css_online(struct cgroup_subsys_state * css)3623 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3624 {
3625 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3626 
3627 	if (memcg_online_kmem(memcg))
3628 		goto remove_id;
3629 
3630 	/*
3631 	 * A memcg must be visible for expand_shrinker_info()
3632 	 * by the time the maps are allocated. So, we allocate maps
3633 	 * here, when for_each_mem_cgroup() can't skip it.
3634 	 */
3635 	if (alloc_shrinker_info(memcg))
3636 		goto offline_kmem;
3637 
3638 	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3639 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3640 				   FLUSH_TIME);
3641 	lru_gen_online_memcg(memcg);
3642 
3643 	/* Online state pins memcg ID, memcg ID pins CSS */
3644 	refcount_set(&memcg->id.ref, 1);
3645 	css_get(css);
3646 
3647 	/*
3648 	 * Ensure mem_cgroup_from_id() works once we're fully online.
3649 	 *
3650 	 * We could do this earlier and require callers to filter with
3651 	 * css_tryget_online(). But right now there are no users that
3652 	 * need earlier access, and the workingset code relies on the
3653 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3654 	 * publish it here at the end of onlining. This matches the
3655 	 * regular ID destruction during offlining.
3656 	 */
3657 	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3658 
3659 	return 0;
3660 offline_kmem:
3661 	memcg_offline_kmem(memcg);
3662 remove_id:
3663 	mem_cgroup_id_remove(memcg);
3664 	return -ENOMEM;
3665 }
3666 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3667 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3668 {
3669 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3670 
3671 	memcg1_css_offline(memcg);
3672 
3673 	page_counter_set_min(&memcg->memory, 0);
3674 	page_counter_set_low(&memcg->memory, 0);
3675 
3676 	zswap_memcg_offline_cleanup(memcg);
3677 
3678 	memcg_offline_kmem(memcg);
3679 	reparent_shrinker_deferred(memcg);
3680 	wb_memcg_offline(memcg);
3681 	lru_gen_offline_memcg(memcg);
3682 
3683 	drain_all_stock(memcg);
3684 
3685 	mem_cgroup_id_put(memcg);
3686 }
3687 
mem_cgroup_css_released(struct cgroup_subsys_state * css)3688 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3689 {
3690 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3691 
3692 	invalidate_reclaim_iterators(memcg);
3693 	lru_gen_release_memcg(memcg);
3694 }
3695 
mem_cgroup_css_free(struct cgroup_subsys_state * css)3696 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3697 {
3698 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3699 	int __maybe_unused i;
3700 
3701 #ifdef CONFIG_CGROUP_WRITEBACK
3702 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3703 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3704 #endif
3705 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3706 		static_branch_dec(&memcg_sockets_enabled_key);
3707 
3708 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3709 		static_branch_dec(&memcg_sockets_enabled_key);
3710 
3711 	if (!cgroup_memory_nobpf)
3712 		static_branch_dec(&memcg_bpf_enabled_key);
3713 
3714 	vmpressure_cleanup(&memcg->vmpressure);
3715 	cancel_work_sync(&memcg->high_work);
3716 	memcg1_remove_from_trees(memcg);
3717 	free_shrinker_info(memcg);
3718 	mem_cgroup_free(memcg);
3719 }
3720 
3721 /**
3722  * mem_cgroup_css_reset - reset the states of a mem_cgroup
3723  * @css: the target css
3724  *
3725  * Reset the states of the mem_cgroup associated with @css.  This is
3726  * invoked when the userland requests disabling on the default hierarchy
3727  * but the memcg is pinned through dependency.  The memcg should stop
3728  * applying policies and should revert to the vanilla state as it may be
3729  * made visible again.
3730  *
3731  * The current implementation only resets the essential configurations.
3732  * This needs to be expanded to cover all the visible parts.
3733  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3734 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3735 {
3736 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3737 
3738 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3739 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3740 #ifdef CONFIG_MEMCG_V1
3741 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3742 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3743 #endif
3744 	page_counter_set_min(&memcg->memory, 0);
3745 	page_counter_set_low(&memcg->memory, 0);
3746 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3747 	memcg1_soft_limit_reset(memcg);
3748 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3749 	memcg_wb_domain_size_changed(memcg);
3750 }
3751 
3752 struct aggregate_control {
3753 	/* pointer to the aggregated (CPU and subtree aggregated) counters */
3754 	long *aggregate;
3755 	/* pointer to the non-hierarchichal (CPU aggregated) counters */
3756 	long *local;
3757 	/* pointer to the pending child counters during tree propagation */
3758 	long *pending;
3759 	/* pointer to the parent's pending counters, could be NULL */
3760 	long *ppending;
3761 	/* pointer to the percpu counters to be aggregated */
3762 	long *cstat;
3763 	/* pointer to the percpu counters of the last aggregation*/
3764 	long *cstat_prev;
3765 	/* size of the above counters */
3766 	int size;
3767 };
3768 
mem_cgroup_stat_aggregate(struct aggregate_control * ac)3769 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3770 {
3771 	int i;
3772 	long delta, delta_cpu, v;
3773 
3774 	for (i = 0; i < ac->size; i++) {
3775 		/*
3776 		 * Collect the aggregated propagation counts of groups
3777 		 * below us. We're in a per-cpu loop here and this is
3778 		 * a global counter, so the first cycle will get them.
3779 		 */
3780 		delta = ac->pending[i];
3781 		if (delta)
3782 			ac->pending[i] = 0;
3783 
3784 		/* Add CPU changes on this level since the last flush */
3785 		delta_cpu = 0;
3786 		v = READ_ONCE(ac->cstat[i]);
3787 		if (v != ac->cstat_prev[i]) {
3788 			delta_cpu = v - ac->cstat_prev[i];
3789 			delta += delta_cpu;
3790 			ac->cstat_prev[i] = v;
3791 		}
3792 
3793 		/* Aggregate counts on this level and propagate upwards */
3794 		if (delta_cpu)
3795 			ac->local[i] += delta_cpu;
3796 
3797 		if (delta) {
3798 			ac->aggregate[i] += delta;
3799 			if (ac->ppending)
3800 				ac->ppending[i] += delta;
3801 		}
3802 	}
3803 }
3804 
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3805 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3806 {
3807 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3808 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3809 	struct memcg_vmstats_percpu *statc;
3810 	struct aggregate_control ac;
3811 	int nid;
3812 
3813 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3814 
3815 	ac = (struct aggregate_control) {
3816 		.aggregate = memcg->vmstats->state,
3817 		.local = memcg->vmstats->state_local,
3818 		.pending = memcg->vmstats->state_pending,
3819 		.ppending = parent ? parent->vmstats->state_pending : NULL,
3820 		.cstat = statc->state,
3821 		.cstat_prev = statc->state_prev,
3822 		.size = MEMCG_VMSTAT_SIZE,
3823 	};
3824 	mem_cgroup_stat_aggregate(&ac);
3825 
3826 	ac = (struct aggregate_control) {
3827 		.aggregate = memcg->vmstats->events,
3828 		.local = memcg->vmstats->events_local,
3829 		.pending = memcg->vmstats->events_pending,
3830 		.ppending = parent ? parent->vmstats->events_pending : NULL,
3831 		.cstat = statc->events,
3832 		.cstat_prev = statc->events_prev,
3833 		.size = NR_MEMCG_EVENTS,
3834 	};
3835 	mem_cgroup_stat_aggregate(&ac);
3836 
3837 	for_each_node_state(nid, N_MEMORY) {
3838 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3839 		struct lruvec_stats *lstats = pn->lruvec_stats;
3840 		struct lruvec_stats *plstats = NULL;
3841 		struct lruvec_stats_percpu *lstatc;
3842 
3843 		if (parent)
3844 			plstats = parent->nodeinfo[nid]->lruvec_stats;
3845 
3846 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3847 
3848 		ac = (struct aggregate_control) {
3849 			.aggregate = lstats->state,
3850 			.local = lstats->state_local,
3851 			.pending = lstats->state_pending,
3852 			.ppending = plstats ? plstats->state_pending : NULL,
3853 			.cstat = lstatc->state,
3854 			.cstat_prev = lstatc->state_prev,
3855 			.size = NR_MEMCG_NODE_STAT_ITEMS,
3856 		};
3857 		mem_cgroup_stat_aggregate(&ac);
3858 
3859 	}
3860 	WRITE_ONCE(statc->stats_updates, 0);
3861 	/* We are in a per-cpu loop here, only do the atomic write once */
3862 	if (atomic64_read(&memcg->vmstats->stats_updates))
3863 		atomic64_set(&memcg->vmstats->stats_updates, 0);
3864 }
3865 
mem_cgroup_fork(struct task_struct * task)3866 static void mem_cgroup_fork(struct task_struct *task)
3867 {
3868 	/*
3869 	 * Set the update flag to cause task->objcg to be initialized lazily
3870 	 * on the first allocation. It can be done without any synchronization
3871 	 * because it's always performed on the current task, so does
3872 	 * current_objcg_update().
3873 	 */
3874 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3875 }
3876 
mem_cgroup_exit(struct task_struct * task)3877 static void mem_cgroup_exit(struct task_struct *task)
3878 {
3879 	struct obj_cgroup *objcg = task->objcg;
3880 
3881 	objcg = (struct obj_cgroup *)
3882 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3883 	obj_cgroup_put(objcg);
3884 
3885 	/*
3886 	 * Some kernel allocations can happen after this point,
3887 	 * but let's ignore them. It can be done without any synchronization
3888 	 * because it's always performed on the current task, so does
3889 	 * current_objcg_update().
3890 	 */
3891 	task->objcg = NULL;
3892 }
3893 
3894 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3895 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3896 {
3897 	struct task_struct *task;
3898 	struct cgroup_subsys_state *css;
3899 
3900 	/* find the first leader if there is any */
3901 	cgroup_taskset_for_each_leader(task, css, tset)
3902 		break;
3903 
3904 	if (!task)
3905 		return;
3906 
3907 	task_lock(task);
3908 	if (task->mm && READ_ONCE(task->mm->owner) == task)
3909 		lru_gen_migrate_mm(task->mm);
3910 	task_unlock(task);
3911 }
3912 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3913 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3914 #endif /* CONFIG_LRU_GEN */
3915 
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)3916 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3917 {
3918 	struct task_struct *task;
3919 	struct cgroup_subsys_state *css;
3920 
3921 	cgroup_taskset_for_each(task, css, tset) {
3922 		/* atomically set the update bit */
3923 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3924 	}
3925 }
3926 
mem_cgroup_attach(struct cgroup_taskset * tset)3927 static void mem_cgroup_attach(struct cgroup_taskset *tset)
3928 {
3929 	mem_cgroup_lru_gen_attach(tset);
3930 	mem_cgroup_kmem_attach(tset);
3931 }
3932 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)3933 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3934 {
3935 	if (value == PAGE_COUNTER_MAX)
3936 		seq_puts(m, "max\n");
3937 	else
3938 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3939 
3940 	return 0;
3941 }
3942 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)3943 static u64 memory_current_read(struct cgroup_subsys_state *css,
3944 			       struct cftype *cft)
3945 {
3946 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3947 
3948 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3949 }
3950 
3951 #define OFP_PEAK_UNSET (((-1UL)))
3952 
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)3953 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
3954 {
3955 	struct cgroup_of_peak *ofp = of_peak(sf->private);
3956 	u64 fd_peak = READ_ONCE(ofp->value), peak;
3957 
3958 	/* User wants global or local peak? */
3959 	if (fd_peak == OFP_PEAK_UNSET)
3960 		peak = pc->watermark;
3961 	else
3962 		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
3963 
3964 	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
3965 	return 0;
3966 }
3967 
memory_peak_show(struct seq_file * sf,void * v)3968 static int memory_peak_show(struct seq_file *sf, void *v)
3969 {
3970 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3971 
3972 	return peak_show(sf, v, &memcg->memory);
3973 }
3974 
peak_open(struct kernfs_open_file * of)3975 static int peak_open(struct kernfs_open_file *of)
3976 {
3977 	struct cgroup_of_peak *ofp = of_peak(of);
3978 
3979 	ofp->value = OFP_PEAK_UNSET;
3980 	return 0;
3981 }
3982 
peak_release(struct kernfs_open_file * of)3983 static void peak_release(struct kernfs_open_file *of)
3984 {
3985 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3986 	struct cgroup_of_peak *ofp = of_peak(of);
3987 
3988 	if (ofp->value == OFP_PEAK_UNSET) {
3989 		/* fast path (no writes on this fd) */
3990 		return;
3991 	}
3992 	spin_lock(&memcg->peaks_lock);
3993 	list_del(&ofp->list);
3994 	spin_unlock(&memcg->peaks_lock);
3995 }
3996 
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)3997 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
3998 			  loff_t off, struct page_counter *pc,
3999 			  struct list_head *watchers)
4000 {
4001 	unsigned long usage;
4002 	struct cgroup_of_peak *peer_ctx;
4003 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4004 	struct cgroup_of_peak *ofp = of_peak(of);
4005 
4006 	spin_lock(&memcg->peaks_lock);
4007 
4008 	usage = page_counter_read(pc);
4009 	WRITE_ONCE(pc->local_watermark, usage);
4010 
4011 	list_for_each_entry(peer_ctx, watchers, list)
4012 		if (usage > peer_ctx->value)
4013 			WRITE_ONCE(peer_ctx->value, usage);
4014 
4015 	/* initial write, register watcher */
4016 	if (ofp->value == -1)
4017 		list_add(&ofp->list, watchers);
4018 
4019 	WRITE_ONCE(ofp->value, usage);
4020 	spin_unlock(&memcg->peaks_lock);
4021 
4022 	return nbytes;
4023 }
4024 
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4025 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4026 				 size_t nbytes, loff_t off)
4027 {
4028 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4029 
4030 	return peak_write(of, buf, nbytes, off, &memcg->memory,
4031 			  &memcg->memory_peaks);
4032 }
4033 
4034 #undef OFP_PEAK_UNSET
4035 
memory_min_show(struct seq_file * m,void * v)4036 static int memory_min_show(struct seq_file *m, void *v)
4037 {
4038 	return seq_puts_memcg_tunable(m,
4039 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4040 }
4041 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4042 static ssize_t memory_min_write(struct kernfs_open_file *of,
4043 				char *buf, size_t nbytes, loff_t off)
4044 {
4045 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4046 	unsigned long min;
4047 	int err;
4048 
4049 	buf = strstrip(buf);
4050 	err = page_counter_memparse(buf, "max", &min);
4051 	if (err)
4052 		return err;
4053 
4054 	page_counter_set_min(&memcg->memory, min);
4055 
4056 	return nbytes;
4057 }
4058 
memory_low_show(struct seq_file * m,void * v)4059 static int memory_low_show(struct seq_file *m, void *v)
4060 {
4061 	return seq_puts_memcg_tunable(m,
4062 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4063 }
4064 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4065 static ssize_t memory_low_write(struct kernfs_open_file *of,
4066 				char *buf, size_t nbytes, loff_t off)
4067 {
4068 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4069 	unsigned long low;
4070 	int err;
4071 
4072 	buf = strstrip(buf);
4073 	err = page_counter_memparse(buf, "max", &low);
4074 	if (err)
4075 		return err;
4076 
4077 	page_counter_set_low(&memcg->memory, low);
4078 
4079 	return nbytes;
4080 }
4081 
memory_high_show(struct seq_file * m,void * v)4082 static int memory_high_show(struct seq_file *m, void *v)
4083 {
4084 	return seq_puts_memcg_tunable(m,
4085 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4086 }
4087 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4088 static ssize_t memory_high_write(struct kernfs_open_file *of,
4089 				 char *buf, size_t nbytes, loff_t off)
4090 {
4091 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4092 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4093 	bool drained = false;
4094 	unsigned long high;
4095 	int err;
4096 
4097 	buf = strstrip(buf);
4098 	err = page_counter_memparse(buf, "max", &high);
4099 	if (err)
4100 		return err;
4101 
4102 	page_counter_set_high(&memcg->memory, high);
4103 
4104 	for (;;) {
4105 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4106 		unsigned long reclaimed;
4107 
4108 		if (nr_pages <= high)
4109 			break;
4110 
4111 		if (signal_pending(current))
4112 			break;
4113 
4114 		if (!drained) {
4115 			drain_all_stock(memcg);
4116 			drained = true;
4117 			continue;
4118 		}
4119 
4120 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4121 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4122 
4123 		if (!reclaimed && !nr_retries--)
4124 			break;
4125 	}
4126 
4127 	memcg_wb_domain_size_changed(memcg);
4128 	return nbytes;
4129 }
4130 
memory_max_show(struct seq_file * m,void * v)4131 static int memory_max_show(struct seq_file *m, void *v)
4132 {
4133 	return seq_puts_memcg_tunable(m,
4134 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4135 }
4136 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4137 static ssize_t memory_max_write(struct kernfs_open_file *of,
4138 				char *buf, size_t nbytes, loff_t off)
4139 {
4140 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4141 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4142 	bool drained = false;
4143 	unsigned long max;
4144 	int err;
4145 
4146 	buf = strstrip(buf);
4147 	err = page_counter_memparse(buf, "max", &max);
4148 	if (err)
4149 		return err;
4150 
4151 	xchg(&memcg->memory.max, max);
4152 
4153 	for (;;) {
4154 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4155 
4156 		if (nr_pages <= max)
4157 			break;
4158 
4159 		if (signal_pending(current))
4160 			break;
4161 
4162 		if (!drained) {
4163 			drain_all_stock(memcg);
4164 			drained = true;
4165 			continue;
4166 		}
4167 
4168 		if (nr_reclaims) {
4169 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4170 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4171 				nr_reclaims--;
4172 			continue;
4173 		}
4174 
4175 		memcg_memory_event(memcg, MEMCG_OOM);
4176 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4177 			break;
4178 		cond_resched();
4179 	}
4180 
4181 	memcg_wb_domain_size_changed(memcg);
4182 	return nbytes;
4183 }
4184 
4185 /*
4186  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4187  * if any new events become available.
4188  */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4189 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4190 {
4191 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4192 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4193 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4194 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4195 	seq_printf(m, "oom_kill %lu\n",
4196 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4197 	seq_printf(m, "oom_group_kill %lu\n",
4198 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4199 }
4200 
memory_events_show(struct seq_file * m,void * v)4201 static int memory_events_show(struct seq_file *m, void *v)
4202 {
4203 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4204 
4205 	__memory_events_show(m, memcg->memory_events);
4206 	return 0;
4207 }
4208 
memory_events_local_show(struct seq_file * m,void * v)4209 static int memory_events_local_show(struct seq_file *m, void *v)
4210 {
4211 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4212 
4213 	__memory_events_show(m, memcg->memory_events_local);
4214 	return 0;
4215 }
4216 
memory_stat_show(struct seq_file * m,void * v)4217 int memory_stat_show(struct seq_file *m, void *v)
4218 {
4219 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4220 	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4221 	struct seq_buf s;
4222 
4223 	if (!buf)
4224 		return -ENOMEM;
4225 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4226 	memory_stat_format(memcg, &s);
4227 	seq_puts(m, buf);
4228 	kfree(buf);
4229 	return 0;
4230 }
4231 
4232 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4233 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4234 						     int item)
4235 {
4236 	return lruvec_page_state(lruvec, item) *
4237 		memcg_page_state_output_unit(item);
4238 }
4239 
memory_numa_stat_show(struct seq_file * m,void * v)4240 static int memory_numa_stat_show(struct seq_file *m, void *v)
4241 {
4242 	int i;
4243 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4244 
4245 	mem_cgroup_flush_stats(memcg);
4246 
4247 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4248 		int nid;
4249 
4250 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4251 			continue;
4252 
4253 		seq_printf(m, "%s", memory_stats[i].name);
4254 		for_each_node_state(nid, N_MEMORY) {
4255 			u64 size;
4256 			struct lruvec *lruvec;
4257 
4258 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4259 			size = lruvec_page_state_output(lruvec,
4260 							memory_stats[i].idx);
4261 			seq_printf(m, " N%d=%llu", nid, size);
4262 		}
4263 		seq_putc(m, '\n');
4264 	}
4265 
4266 	return 0;
4267 }
4268 #endif
4269 
memory_oom_group_show(struct seq_file * m,void * v)4270 static int memory_oom_group_show(struct seq_file *m, void *v)
4271 {
4272 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4273 
4274 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4275 
4276 	return 0;
4277 }
4278 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4279 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4280 				      char *buf, size_t nbytes, loff_t off)
4281 {
4282 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4283 	int ret, oom_group;
4284 
4285 	buf = strstrip(buf);
4286 	if (!buf)
4287 		return -EINVAL;
4288 
4289 	ret = kstrtoint(buf, 0, &oom_group);
4290 	if (ret)
4291 		return ret;
4292 
4293 	if (oom_group != 0 && oom_group != 1)
4294 		return -EINVAL;
4295 
4296 	WRITE_ONCE(memcg->oom_group, oom_group);
4297 
4298 	return nbytes;
4299 }
4300 
4301 enum {
4302 	MEMORY_RECLAIM_SWAPPINESS = 0,
4303 	MEMORY_RECLAIM_NULL,
4304 };
4305 
4306 static const match_table_t tokens = {
4307 	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4308 	{ MEMORY_RECLAIM_NULL, NULL },
4309 };
4310 
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4311 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4312 			      size_t nbytes, loff_t off)
4313 {
4314 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4315 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4316 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4317 	int swappiness = -1;
4318 	unsigned int reclaim_options;
4319 	char *old_buf, *start;
4320 	substring_t args[MAX_OPT_ARGS];
4321 
4322 	buf = strstrip(buf);
4323 
4324 	old_buf = buf;
4325 	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4326 	if (buf == old_buf)
4327 		return -EINVAL;
4328 
4329 	buf = strstrip(buf);
4330 
4331 	while ((start = strsep(&buf, " ")) != NULL) {
4332 		if (!strlen(start))
4333 			continue;
4334 		switch (match_token(start, tokens, args)) {
4335 		case MEMORY_RECLAIM_SWAPPINESS:
4336 			if (match_int(&args[0], &swappiness))
4337 				return -EINVAL;
4338 			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4339 				return -EINVAL;
4340 			break;
4341 		default:
4342 			return -EINVAL;
4343 		}
4344 	}
4345 
4346 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4347 	while (nr_reclaimed < nr_to_reclaim) {
4348 		/* Will converge on zero, but reclaim enforces a minimum */
4349 		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4350 		unsigned long reclaimed;
4351 
4352 		if (signal_pending(current))
4353 			return -EINTR;
4354 
4355 		/*
4356 		 * This is the final attempt, drain percpu lru caches in the
4357 		 * hope of introducing more evictable pages for
4358 		 * try_to_free_mem_cgroup_pages().
4359 		 */
4360 		if (!nr_retries)
4361 			lru_add_drain_all();
4362 
4363 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4364 					batch_size, GFP_KERNEL,
4365 					reclaim_options,
4366 					swappiness == -1 ? NULL : &swappiness);
4367 
4368 		if (!reclaimed && !nr_retries--)
4369 			return -EAGAIN;
4370 
4371 		nr_reclaimed += reclaimed;
4372 	}
4373 
4374 	return nbytes;
4375 }
4376 
4377 static struct cftype memory_files[] = {
4378 	{
4379 		.name = "current",
4380 		.flags = CFTYPE_NOT_ON_ROOT,
4381 		.read_u64 = memory_current_read,
4382 	},
4383 	{
4384 		.name = "peak",
4385 		.flags = CFTYPE_NOT_ON_ROOT,
4386 		.open = peak_open,
4387 		.release = peak_release,
4388 		.seq_show = memory_peak_show,
4389 		.write = memory_peak_write,
4390 	},
4391 	{
4392 		.name = "min",
4393 		.flags = CFTYPE_NOT_ON_ROOT,
4394 		.seq_show = memory_min_show,
4395 		.write = memory_min_write,
4396 	},
4397 	{
4398 		.name = "low",
4399 		.flags = CFTYPE_NOT_ON_ROOT,
4400 		.seq_show = memory_low_show,
4401 		.write = memory_low_write,
4402 	},
4403 	{
4404 		.name = "high",
4405 		.flags = CFTYPE_NOT_ON_ROOT,
4406 		.seq_show = memory_high_show,
4407 		.write = memory_high_write,
4408 	},
4409 	{
4410 		.name = "max",
4411 		.flags = CFTYPE_NOT_ON_ROOT,
4412 		.seq_show = memory_max_show,
4413 		.write = memory_max_write,
4414 	},
4415 	{
4416 		.name = "events",
4417 		.flags = CFTYPE_NOT_ON_ROOT,
4418 		.file_offset = offsetof(struct mem_cgroup, events_file),
4419 		.seq_show = memory_events_show,
4420 	},
4421 	{
4422 		.name = "events.local",
4423 		.flags = CFTYPE_NOT_ON_ROOT,
4424 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4425 		.seq_show = memory_events_local_show,
4426 	},
4427 	{
4428 		.name = "stat",
4429 		.seq_show = memory_stat_show,
4430 	},
4431 #ifdef CONFIG_NUMA
4432 	{
4433 		.name = "numa_stat",
4434 		.seq_show = memory_numa_stat_show,
4435 	},
4436 #endif
4437 	{
4438 		.name = "oom.group",
4439 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4440 		.seq_show = memory_oom_group_show,
4441 		.write = memory_oom_group_write,
4442 	},
4443 	{
4444 		.name = "reclaim",
4445 		.flags = CFTYPE_NS_DELEGATABLE,
4446 		.write = memory_reclaim,
4447 	},
4448 	{ }	/* terminate */
4449 };
4450 
4451 struct cgroup_subsys memory_cgrp_subsys = {
4452 	.css_alloc = mem_cgroup_css_alloc,
4453 	.css_online = mem_cgroup_css_online,
4454 	.css_offline = mem_cgroup_css_offline,
4455 	.css_released = mem_cgroup_css_released,
4456 	.css_free = mem_cgroup_css_free,
4457 	.css_reset = mem_cgroup_css_reset,
4458 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4459 	.attach = mem_cgroup_attach,
4460 	.fork = mem_cgroup_fork,
4461 	.exit = mem_cgroup_exit,
4462 	.dfl_cftypes = memory_files,
4463 #ifdef CONFIG_MEMCG_V1
4464 	.legacy_cftypes = mem_cgroup_legacy_files,
4465 #endif
4466 	.early_init = 0,
4467 };
4468 
4469 /**
4470  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4471  * @root: the top ancestor of the sub-tree being checked
4472  * @memcg: the memory cgroup to check
4473  *
4474  * WARNING: This function is not stateless! It can only be used as part
4475  *          of a top-down tree iteration, not for isolated queries.
4476  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4477 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4478 				     struct mem_cgroup *memcg)
4479 {
4480 	bool recursive_protection =
4481 		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4482 
4483 	if (mem_cgroup_disabled())
4484 		return;
4485 
4486 	if (!root)
4487 		root = root_mem_cgroup;
4488 
4489 	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4490 }
4491 
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4492 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4493 			gfp_t gfp)
4494 {
4495 	int ret;
4496 
4497 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4498 	if (ret)
4499 		goto out;
4500 
4501 	css_get(&memcg->css);
4502 	commit_charge(folio, memcg);
4503 	memcg1_commit_charge(folio, memcg);
4504 out:
4505 	return ret;
4506 }
4507 
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4508 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4509 {
4510 	struct mem_cgroup *memcg;
4511 	int ret;
4512 
4513 	memcg = get_mem_cgroup_from_mm(mm);
4514 	ret = charge_memcg(folio, memcg, gfp);
4515 	css_put(&memcg->css);
4516 
4517 	return ret;
4518 }
4519 
4520 /**
4521  * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4522  * @folio: folio being charged
4523  * @gfp: reclaim mode
4524  *
4525  * This function is called when allocating a huge page folio, after the page has
4526  * already been obtained and charged to the appropriate hugetlb cgroup
4527  * controller (if it is enabled).
4528  *
4529  * Returns ENOMEM if the memcg is already full.
4530  * Returns 0 if either the charge was successful, or if we skip the charging.
4531  */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4532 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4533 {
4534 	struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4535 	int ret = 0;
4536 
4537 	/*
4538 	 * Even memcg does not account for hugetlb, we still want to update
4539 	 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4540 	 * charging the memcg.
4541 	 */
4542 	if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4543 		!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4544 		goto out;
4545 
4546 	if (charge_memcg(folio, memcg, gfp))
4547 		ret = -ENOMEM;
4548 
4549 out:
4550 	mem_cgroup_put(memcg);
4551 	return ret;
4552 }
4553 
4554 /**
4555  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4556  * @folio: folio to charge.
4557  * @mm: mm context of the victim
4558  * @gfp: reclaim mode
4559  * @entry: swap entry for which the folio is allocated
4560  *
4561  * This function charges a folio allocated for swapin. Please call this before
4562  * adding the folio to the swapcache.
4563  *
4564  * Returns 0 on success. Otherwise, an error code is returned.
4565  */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4566 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4567 				  gfp_t gfp, swp_entry_t entry)
4568 {
4569 	struct mem_cgroup *memcg;
4570 	unsigned short id;
4571 	int ret;
4572 
4573 	if (mem_cgroup_disabled())
4574 		return 0;
4575 
4576 	id = lookup_swap_cgroup_id(entry);
4577 	rcu_read_lock();
4578 	memcg = mem_cgroup_from_id(id);
4579 	if (!memcg || !css_tryget_online(&memcg->css))
4580 		memcg = get_mem_cgroup_from_mm(mm);
4581 	rcu_read_unlock();
4582 
4583 	ret = charge_memcg(folio, memcg, gfp);
4584 
4585 	css_put(&memcg->css);
4586 	return ret;
4587 }
4588 
4589 /*
4590  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4591  * @entry: the first swap entry for which the pages are charged
4592  * @nr_pages: number of pages which will be uncharged
4593  *
4594  * Call this function after successfully adding the charged page to swapcache.
4595  *
4596  * Note: This function assumes the page for which swap slot is being uncharged
4597  * is order 0 page.
4598  */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)4599 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
4600 {
4601 	/*
4602 	 * Cgroup1's unified memory+swap counter has been charged with the
4603 	 * new swapcache page, finish the transfer by uncharging the swap
4604 	 * slot. The swap slot would also get uncharged when it dies, but
4605 	 * it can stick around indefinitely and we'd count the page twice
4606 	 * the entire time.
4607 	 *
4608 	 * Cgroup2 has separate resource counters for memory and swap,
4609 	 * so this is a non-issue here. Memory and swap charge lifetimes
4610 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
4611 	 * page to memory here, and uncharge swap when the slot is freed.
4612 	 */
4613 	if (do_memsw_account()) {
4614 		/*
4615 		 * The swap entry might not get freed for a long time,
4616 		 * let's not wait for it.  The page already received a
4617 		 * memory+swap charge, drop the swap entry duplicate.
4618 		 */
4619 		mem_cgroup_uncharge_swap(entry, nr_pages);
4620 	}
4621 }
4622 
4623 struct uncharge_gather {
4624 	struct mem_cgroup *memcg;
4625 	unsigned long nr_memory;
4626 	unsigned long pgpgout;
4627 	unsigned long nr_kmem;
4628 	int nid;
4629 };
4630 
uncharge_gather_clear(struct uncharge_gather * ug)4631 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4632 {
4633 	memset(ug, 0, sizeof(*ug));
4634 }
4635 
uncharge_batch(const struct uncharge_gather * ug)4636 static void uncharge_batch(const struct uncharge_gather *ug)
4637 {
4638 	if (ug->nr_memory) {
4639 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4640 		if (do_memsw_account())
4641 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4642 		if (ug->nr_kmem) {
4643 			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4644 			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4645 		}
4646 		memcg1_oom_recover(ug->memcg);
4647 	}
4648 
4649 	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4650 
4651 	/* drop reference from uncharge_folio */
4652 	css_put(&ug->memcg->css);
4653 }
4654 
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4655 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4656 {
4657 	long nr_pages;
4658 	struct mem_cgroup *memcg;
4659 	struct obj_cgroup *objcg;
4660 
4661 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4662 
4663 	/*
4664 	 * Nobody should be changing or seriously looking at
4665 	 * folio memcg or objcg at this point, we have fully
4666 	 * exclusive access to the folio.
4667 	 */
4668 	if (folio_memcg_kmem(folio)) {
4669 		objcg = __folio_objcg(folio);
4670 		/*
4671 		 * This get matches the put at the end of the function and
4672 		 * kmem pages do not hold memcg references anymore.
4673 		 */
4674 		memcg = get_mem_cgroup_from_objcg(objcg);
4675 	} else {
4676 		memcg = __folio_memcg(folio);
4677 	}
4678 
4679 	if (!memcg)
4680 		return;
4681 
4682 	if (ug->memcg != memcg) {
4683 		if (ug->memcg) {
4684 			uncharge_batch(ug);
4685 			uncharge_gather_clear(ug);
4686 		}
4687 		ug->memcg = memcg;
4688 		ug->nid = folio_nid(folio);
4689 
4690 		/* pairs with css_put in uncharge_batch */
4691 		css_get(&memcg->css);
4692 	}
4693 
4694 	nr_pages = folio_nr_pages(folio);
4695 
4696 	if (folio_memcg_kmem(folio)) {
4697 		ug->nr_memory += nr_pages;
4698 		ug->nr_kmem += nr_pages;
4699 
4700 		folio->memcg_data = 0;
4701 		obj_cgroup_put(objcg);
4702 	} else {
4703 		/* LRU pages aren't accounted at the root level */
4704 		if (!mem_cgroup_is_root(memcg))
4705 			ug->nr_memory += nr_pages;
4706 		ug->pgpgout++;
4707 
4708 		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4709 		folio->memcg_data = 0;
4710 	}
4711 
4712 	css_put(&memcg->css);
4713 }
4714 
__mem_cgroup_uncharge(struct folio * folio)4715 void __mem_cgroup_uncharge(struct folio *folio)
4716 {
4717 	struct uncharge_gather ug;
4718 
4719 	/* Don't touch folio->lru of any random page, pre-check: */
4720 	if (!folio_memcg_charged(folio))
4721 		return;
4722 
4723 	uncharge_gather_clear(&ug);
4724 	uncharge_folio(folio, &ug);
4725 	uncharge_batch(&ug);
4726 }
4727 
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4728 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4729 {
4730 	struct uncharge_gather ug;
4731 	unsigned int i;
4732 
4733 	uncharge_gather_clear(&ug);
4734 	for (i = 0; i < folios->nr; i++)
4735 		uncharge_folio(folios->folios[i], &ug);
4736 	if (ug.memcg)
4737 		uncharge_batch(&ug);
4738 }
4739 
4740 /**
4741  * mem_cgroup_replace_folio - Charge a folio's replacement.
4742  * @old: Currently circulating folio.
4743  * @new: Replacement folio.
4744  *
4745  * Charge @new as a replacement folio for @old. @old will
4746  * be uncharged upon free.
4747  *
4748  * Both folios must be locked, @new->mapping must be set up.
4749  */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4750 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4751 {
4752 	struct mem_cgroup *memcg;
4753 	long nr_pages = folio_nr_pages(new);
4754 
4755 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4756 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4757 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4758 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4759 
4760 	if (mem_cgroup_disabled())
4761 		return;
4762 
4763 	/* Page cache replacement: new folio already charged? */
4764 	if (folio_memcg_charged(new))
4765 		return;
4766 
4767 	memcg = folio_memcg(old);
4768 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4769 	if (!memcg)
4770 		return;
4771 
4772 	/* Force-charge the new page. The old one will be freed soon */
4773 	if (!mem_cgroup_is_root(memcg)) {
4774 		page_counter_charge(&memcg->memory, nr_pages);
4775 		if (do_memsw_account())
4776 			page_counter_charge(&memcg->memsw, nr_pages);
4777 	}
4778 
4779 	css_get(&memcg->css);
4780 	commit_charge(new, memcg);
4781 	memcg1_commit_charge(new, memcg);
4782 }
4783 
4784 /**
4785  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4786  * @old: Currently circulating folio.
4787  * @new: Replacement folio.
4788  *
4789  * Transfer the memcg data from the old folio to the new folio for migration.
4790  * The old folio's data info will be cleared. Note that the memory counters
4791  * will remain unchanged throughout the process.
4792  *
4793  * Both folios must be locked, @new->mapping must be set up.
4794  */
mem_cgroup_migrate(struct folio * old,struct folio * new)4795 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4796 {
4797 	struct mem_cgroup *memcg;
4798 
4799 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4800 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4801 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4802 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4803 	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4804 
4805 	if (mem_cgroup_disabled())
4806 		return;
4807 
4808 	memcg = folio_memcg(old);
4809 	/*
4810 	 * Note that it is normal to see !memcg for a hugetlb folio.
4811 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4812 	 * was not selected.
4813 	 */
4814 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4815 	if (!memcg)
4816 		return;
4817 
4818 	/* Transfer the charge and the css ref */
4819 	commit_charge(new, memcg);
4820 
4821 	/* Warning should never happen, so don't worry about refcount non-0 */
4822 	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4823 	old->memcg_data = 0;
4824 }
4825 
4826 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4827 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4828 
mem_cgroup_sk_alloc(struct sock * sk)4829 void mem_cgroup_sk_alloc(struct sock *sk)
4830 {
4831 	struct mem_cgroup *memcg;
4832 
4833 	if (!mem_cgroup_sockets_enabled)
4834 		return;
4835 
4836 	/* Do not associate the sock with unrelated interrupted task's memcg. */
4837 	if (!in_task())
4838 		return;
4839 
4840 	rcu_read_lock();
4841 	memcg = mem_cgroup_from_task(current);
4842 	if (mem_cgroup_is_root(memcg))
4843 		goto out;
4844 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4845 		goto out;
4846 	if (css_tryget(&memcg->css))
4847 		sk->sk_memcg = memcg;
4848 out:
4849 	rcu_read_unlock();
4850 }
4851 
mem_cgroup_sk_free(struct sock * sk)4852 void mem_cgroup_sk_free(struct sock *sk)
4853 {
4854 	if (sk->sk_memcg)
4855 		css_put(&sk->sk_memcg->css);
4856 }
4857 
4858 /**
4859  * mem_cgroup_charge_skmem - charge socket memory
4860  * @memcg: memcg to charge
4861  * @nr_pages: number of pages to charge
4862  * @gfp_mask: reclaim mode
4863  *
4864  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4865  * @memcg's configured limit, %false if it doesn't.
4866  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4867 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4868 			     gfp_t gfp_mask)
4869 {
4870 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4871 		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4872 
4873 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4874 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4875 		return true;
4876 	}
4877 
4878 	return false;
4879 }
4880 
4881 /**
4882  * mem_cgroup_uncharge_skmem - uncharge socket memory
4883  * @memcg: memcg to uncharge
4884  * @nr_pages: number of pages to uncharge
4885  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4886 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4887 {
4888 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4889 		memcg1_uncharge_skmem(memcg, nr_pages);
4890 		return;
4891 	}
4892 
4893 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4894 
4895 	refill_stock(memcg, nr_pages);
4896 }
4897 
cgroup_memory(char * s)4898 static int __init cgroup_memory(char *s)
4899 {
4900 	char *token;
4901 
4902 	while ((token = strsep(&s, ",")) != NULL) {
4903 		if (!*token)
4904 			continue;
4905 		if (!strcmp(token, "nosocket"))
4906 			cgroup_memory_nosocket = true;
4907 		if (!strcmp(token, "nokmem"))
4908 			cgroup_memory_nokmem = true;
4909 		if (!strcmp(token, "nobpf"))
4910 			cgroup_memory_nobpf = true;
4911 	}
4912 	return 1;
4913 }
4914 __setup("cgroup.memory=", cgroup_memory);
4915 
4916 /*
4917  * subsys_initcall() for memory controller.
4918  *
4919  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4920  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4921  * basically everything that doesn't depend on a specific mem_cgroup structure
4922  * should be initialized from here.
4923  */
mem_cgroup_init(void)4924 static int __init mem_cgroup_init(void)
4925 {
4926 	int cpu;
4927 
4928 	/*
4929 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4930 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
4931 	 * to work fine, we should make sure that the overfill threshold can't
4932 	 * exceed S32_MAX / PAGE_SIZE.
4933 	 */
4934 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4935 
4936 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4937 				  memcg_hotplug_cpu_dead);
4938 
4939 	for_each_possible_cpu(cpu)
4940 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4941 			  drain_local_stock);
4942 
4943 	return 0;
4944 }
4945 subsys_initcall(mem_cgroup_init);
4946 
4947 #ifdef CONFIG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)4948 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
4949 {
4950 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
4951 		/*
4952 		 * The root cgroup cannot be destroyed, so it's refcount must
4953 		 * always be >= 1.
4954 		 */
4955 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4956 			VM_BUG_ON(1);
4957 			break;
4958 		}
4959 		memcg = parent_mem_cgroup(memcg);
4960 		if (!memcg)
4961 			memcg = root_mem_cgroup;
4962 	}
4963 	return memcg;
4964 }
4965 
4966 /**
4967  * mem_cgroup_swapout - transfer a memsw charge to swap
4968  * @folio: folio whose memsw charge to transfer
4969  * @entry: swap entry to move the charge to
4970  *
4971  * Transfer the memsw charge of @folio to @entry.
4972  */
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)4973 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4974 {
4975 	struct mem_cgroup *memcg, *swap_memcg;
4976 	unsigned int nr_entries;
4977 
4978 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4979 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
4980 
4981 	if (mem_cgroup_disabled())
4982 		return;
4983 
4984 	if (!do_memsw_account())
4985 		return;
4986 
4987 	memcg = folio_memcg(folio);
4988 
4989 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4990 	if (!memcg)
4991 		return;
4992 
4993 	/*
4994 	 * In case the memcg owning these pages has been offlined and doesn't
4995 	 * have an ID allocated to it anymore, charge the closest online
4996 	 * ancestor for the swap instead and transfer the memory+swap charge.
4997 	 */
4998 	swap_memcg = mem_cgroup_id_get_online(memcg);
4999 	nr_entries = folio_nr_pages(folio);
5000 	/* Get references for the tail pages, too */
5001 	if (nr_entries > 1)
5002 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5003 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5004 
5005 	swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry);
5006 
5007 	folio_unqueue_deferred_split(folio);
5008 	folio->memcg_data = 0;
5009 
5010 	if (!mem_cgroup_is_root(memcg))
5011 		page_counter_uncharge(&memcg->memory, nr_entries);
5012 
5013 	if (memcg != swap_memcg) {
5014 		if (!mem_cgroup_is_root(swap_memcg))
5015 			page_counter_charge(&swap_memcg->memsw, nr_entries);
5016 		page_counter_uncharge(&memcg->memsw, nr_entries);
5017 	}
5018 
5019 	memcg1_swapout(folio, memcg);
5020 	css_put(&memcg->css);
5021 }
5022 
5023 /**
5024  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5025  * @folio: folio being added to swap
5026  * @entry: swap entry to charge
5027  *
5028  * Try to charge @folio's memcg for the swap space at @entry.
5029  *
5030  * Returns 0 on success, -ENOMEM on failure.
5031  */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5032 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5033 {
5034 	unsigned int nr_pages = folio_nr_pages(folio);
5035 	struct page_counter *counter;
5036 	struct mem_cgroup *memcg;
5037 
5038 	if (do_memsw_account())
5039 		return 0;
5040 
5041 	memcg = folio_memcg(folio);
5042 
5043 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5044 	if (!memcg)
5045 		return 0;
5046 
5047 	if (!entry.val) {
5048 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5049 		return 0;
5050 	}
5051 
5052 	memcg = mem_cgroup_id_get_online(memcg);
5053 
5054 	if (!mem_cgroup_is_root(memcg) &&
5055 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5056 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5057 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5058 		mem_cgroup_id_put(memcg);
5059 		return -ENOMEM;
5060 	}
5061 
5062 	/* Get references for the tail pages, too */
5063 	if (nr_pages > 1)
5064 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5065 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5066 
5067 	swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
5068 
5069 	return 0;
5070 }
5071 
5072 /**
5073  * __mem_cgroup_uncharge_swap - uncharge swap space
5074  * @entry: swap entry to uncharge
5075  * @nr_pages: the amount of swap space to uncharge
5076  */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5077 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5078 {
5079 	struct mem_cgroup *memcg;
5080 	unsigned short id;
5081 
5082 	id = swap_cgroup_clear(entry, nr_pages);
5083 	rcu_read_lock();
5084 	memcg = mem_cgroup_from_id(id);
5085 	if (memcg) {
5086 		if (!mem_cgroup_is_root(memcg)) {
5087 			if (do_memsw_account())
5088 				page_counter_uncharge(&memcg->memsw, nr_pages);
5089 			else
5090 				page_counter_uncharge(&memcg->swap, nr_pages);
5091 		}
5092 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5093 		mem_cgroup_id_put_many(memcg, nr_pages);
5094 	}
5095 	rcu_read_unlock();
5096 }
5097 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5098 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5099 {
5100 	long nr_swap_pages = get_nr_swap_pages();
5101 
5102 	if (mem_cgroup_disabled() || do_memsw_account())
5103 		return nr_swap_pages;
5104 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5105 		nr_swap_pages = min_t(long, nr_swap_pages,
5106 				      READ_ONCE(memcg->swap.max) -
5107 				      page_counter_read(&memcg->swap));
5108 	return nr_swap_pages;
5109 }
5110 
mem_cgroup_swap_full(struct folio * folio)5111 bool mem_cgroup_swap_full(struct folio *folio)
5112 {
5113 	struct mem_cgroup *memcg;
5114 
5115 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5116 
5117 	if (vm_swap_full())
5118 		return true;
5119 	if (do_memsw_account())
5120 		return false;
5121 
5122 	memcg = folio_memcg(folio);
5123 	if (!memcg)
5124 		return false;
5125 
5126 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5127 		unsigned long usage = page_counter_read(&memcg->swap);
5128 
5129 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5130 		    usage * 2 >= READ_ONCE(memcg->swap.max))
5131 			return true;
5132 	}
5133 
5134 	return false;
5135 }
5136 
setup_swap_account(char * s)5137 static int __init setup_swap_account(char *s)
5138 {
5139 	bool res;
5140 
5141 	if (!kstrtobool(s, &res) && !res)
5142 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5143 			     "in favor of configuring swap control via cgroupfs. "
5144 			     "Please report your usecase to linux-mm@kvack.org if you "
5145 			     "depend on this functionality.\n");
5146 	return 1;
5147 }
5148 __setup("swapaccount=", setup_swap_account);
5149 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5150 static u64 swap_current_read(struct cgroup_subsys_state *css,
5151 			     struct cftype *cft)
5152 {
5153 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5154 
5155 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5156 }
5157 
swap_peak_show(struct seq_file * sf,void * v)5158 static int swap_peak_show(struct seq_file *sf, void *v)
5159 {
5160 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5161 
5162 	return peak_show(sf, v, &memcg->swap);
5163 }
5164 
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5165 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5166 			       size_t nbytes, loff_t off)
5167 {
5168 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5169 
5170 	return peak_write(of, buf, nbytes, off, &memcg->swap,
5171 			  &memcg->swap_peaks);
5172 }
5173 
swap_high_show(struct seq_file * m,void * v)5174 static int swap_high_show(struct seq_file *m, void *v)
5175 {
5176 	return seq_puts_memcg_tunable(m,
5177 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5178 }
5179 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5180 static ssize_t swap_high_write(struct kernfs_open_file *of,
5181 			       char *buf, size_t nbytes, loff_t off)
5182 {
5183 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5184 	unsigned long high;
5185 	int err;
5186 
5187 	buf = strstrip(buf);
5188 	err = page_counter_memparse(buf, "max", &high);
5189 	if (err)
5190 		return err;
5191 
5192 	page_counter_set_high(&memcg->swap, high);
5193 
5194 	return nbytes;
5195 }
5196 
swap_max_show(struct seq_file * m,void * v)5197 static int swap_max_show(struct seq_file *m, void *v)
5198 {
5199 	return seq_puts_memcg_tunable(m,
5200 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5201 }
5202 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5203 static ssize_t swap_max_write(struct kernfs_open_file *of,
5204 			      char *buf, size_t nbytes, loff_t off)
5205 {
5206 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5207 	unsigned long max;
5208 	int err;
5209 
5210 	buf = strstrip(buf);
5211 	err = page_counter_memparse(buf, "max", &max);
5212 	if (err)
5213 		return err;
5214 
5215 	xchg(&memcg->swap.max, max);
5216 
5217 	return nbytes;
5218 }
5219 
swap_events_show(struct seq_file * m,void * v)5220 static int swap_events_show(struct seq_file *m, void *v)
5221 {
5222 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5223 
5224 	seq_printf(m, "high %lu\n",
5225 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5226 	seq_printf(m, "max %lu\n",
5227 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5228 	seq_printf(m, "fail %lu\n",
5229 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5230 
5231 	return 0;
5232 }
5233 
5234 static struct cftype swap_files[] = {
5235 	{
5236 		.name = "swap.current",
5237 		.flags = CFTYPE_NOT_ON_ROOT,
5238 		.read_u64 = swap_current_read,
5239 	},
5240 	{
5241 		.name = "swap.high",
5242 		.flags = CFTYPE_NOT_ON_ROOT,
5243 		.seq_show = swap_high_show,
5244 		.write = swap_high_write,
5245 	},
5246 	{
5247 		.name = "swap.max",
5248 		.flags = CFTYPE_NOT_ON_ROOT,
5249 		.seq_show = swap_max_show,
5250 		.write = swap_max_write,
5251 	},
5252 	{
5253 		.name = "swap.peak",
5254 		.flags = CFTYPE_NOT_ON_ROOT,
5255 		.open = peak_open,
5256 		.release = peak_release,
5257 		.seq_show = swap_peak_show,
5258 		.write = swap_peak_write,
5259 	},
5260 	{
5261 		.name = "swap.events",
5262 		.flags = CFTYPE_NOT_ON_ROOT,
5263 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5264 		.seq_show = swap_events_show,
5265 	},
5266 	{ }	/* terminate */
5267 };
5268 
5269 #ifdef CONFIG_ZSWAP
5270 /**
5271  * obj_cgroup_may_zswap - check if this cgroup can zswap
5272  * @objcg: the object cgroup
5273  *
5274  * Check if the hierarchical zswap limit has been reached.
5275  *
5276  * This doesn't check for specific headroom, and it is not atomic
5277  * either. But with zswap, the size of the allocation is only known
5278  * once compression has occurred, and this optimistic pre-check avoids
5279  * spending cycles on compression when there is already no room left
5280  * or zswap is disabled altogether somewhere in the hierarchy.
5281  */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5282 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5283 {
5284 	struct mem_cgroup *memcg, *original_memcg;
5285 	bool ret = true;
5286 
5287 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5288 		return true;
5289 
5290 	original_memcg = get_mem_cgroup_from_objcg(objcg);
5291 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5292 	     memcg = parent_mem_cgroup(memcg)) {
5293 		unsigned long max = READ_ONCE(memcg->zswap_max);
5294 		unsigned long pages;
5295 
5296 		if (max == PAGE_COUNTER_MAX)
5297 			continue;
5298 		if (max == 0) {
5299 			ret = false;
5300 			break;
5301 		}
5302 
5303 		/* Force flush to get accurate stats for charging */
5304 		__mem_cgroup_flush_stats(memcg, true);
5305 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5306 		if (pages < max)
5307 			continue;
5308 		ret = false;
5309 		break;
5310 	}
5311 	mem_cgroup_put(original_memcg);
5312 	return ret;
5313 }
5314 
5315 /**
5316  * obj_cgroup_charge_zswap - charge compression backend memory
5317  * @objcg: the object cgroup
5318  * @size: size of compressed object
5319  *
5320  * This forces the charge after obj_cgroup_may_zswap() allowed
5321  * compression and storage in zwap for this cgroup to go ahead.
5322  */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5323 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5324 {
5325 	struct mem_cgroup *memcg;
5326 
5327 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5328 		return;
5329 
5330 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5331 
5332 	/* PF_MEMALLOC context, charging must succeed */
5333 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5334 		VM_WARN_ON_ONCE(1);
5335 
5336 	rcu_read_lock();
5337 	memcg = obj_cgroup_memcg(objcg);
5338 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5339 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5340 	rcu_read_unlock();
5341 }
5342 
5343 /**
5344  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5345  * @objcg: the object cgroup
5346  * @size: size of compressed object
5347  *
5348  * Uncharges zswap memory on page in.
5349  */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5350 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5351 {
5352 	struct mem_cgroup *memcg;
5353 
5354 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5355 		return;
5356 
5357 	obj_cgroup_uncharge(objcg, size);
5358 
5359 	rcu_read_lock();
5360 	memcg = obj_cgroup_memcg(objcg);
5361 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5362 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5363 	rcu_read_unlock();
5364 }
5365 
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5366 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5367 {
5368 	/* if zswap is disabled, do not block pages going to the swapping device */
5369 	if (!zswap_is_enabled())
5370 		return true;
5371 
5372 	for (; memcg; memcg = parent_mem_cgroup(memcg))
5373 		if (!READ_ONCE(memcg->zswap_writeback))
5374 			return false;
5375 
5376 	return true;
5377 }
5378 
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5379 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5380 			      struct cftype *cft)
5381 {
5382 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5383 
5384 	mem_cgroup_flush_stats(memcg);
5385 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5386 }
5387 
zswap_max_show(struct seq_file * m,void * v)5388 static int zswap_max_show(struct seq_file *m, void *v)
5389 {
5390 	return seq_puts_memcg_tunable(m,
5391 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5392 }
5393 
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5394 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5395 			       char *buf, size_t nbytes, loff_t off)
5396 {
5397 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5398 	unsigned long max;
5399 	int err;
5400 
5401 	buf = strstrip(buf);
5402 	err = page_counter_memparse(buf, "max", &max);
5403 	if (err)
5404 		return err;
5405 
5406 	xchg(&memcg->zswap_max, max);
5407 
5408 	return nbytes;
5409 }
5410 
zswap_writeback_show(struct seq_file * m,void * v)5411 static int zswap_writeback_show(struct seq_file *m, void *v)
5412 {
5413 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5414 
5415 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5416 	return 0;
5417 }
5418 
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5419 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5420 				char *buf, size_t nbytes, loff_t off)
5421 {
5422 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5423 	int zswap_writeback;
5424 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5425 
5426 	if (parse_ret)
5427 		return parse_ret;
5428 
5429 	if (zswap_writeback != 0 && zswap_writeback != 1)
5430 		return -EINVAL;
5431 
5432 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5433 	return nbytes;
5434 }
5435 
5436 static struct cftype zswap_files[] = {
5437 	{
5438 		.name = "zswap.current",
5439 		.flags = CFTYPE_NOT_ON_ROOT,
5440 		.read_u64 = zswap_current_read,
5441 	},
5442 	{
5443 		.name = "zswap.max",
5444 		.flags = CFTYPE_NOT_ON_ROOT,
5445 		.seq_show = zswap_max_show,
5446 		.write = zswap_max_write,
5447 	},
5448 	{
5449 		.name = "zswap.writeback",
5450 		.seq_show = zswap_writeback_show,
5451 		.write = zswap_writeback_write,
5452 	},
5453 	{ }	/* terminate */
5454 };
5455 #endif /* CONFIG_ZSWAP */
5456 
mem_cgroup_swap_init(void)5457 static int __init mem_cgroup_swap_init(void)
5458 {
5459 	if (mem_cgroup_disabled())
5460 		return 0;
5461 
5462 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5463 #ifdef CONFIG_MEMCG_V1
5464 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5465 #endif
5466 #ifdef CONFIG_ZSWAP
5467 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5468 #endif
5469 	return 0;
5470 }
5471 subsys_initcall(mem_cgroup_swap_init);
5472 
5473 #endif /* CONFIG_SWAP */
5474