xref: /linux/mm/memcontrol.c (revision 3f31a806a62e44f7498e2d17719c03f816553f11)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/cpuset.h>
33 #include <linux/sched/mm.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/hugetlb.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/vm_event_item.h>
39 #include <linux/smp.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
42 #include <linux/bit_spinlock.h>
43 #include <linux/rcupdate.h>
44 #include <linux/limits.h>
45 #include <linux/export.h>
46 #include <linux/list.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swapops.h>
51 #include <linux/spinlock.h>
52 #include <linux/fs.h>
53 #include <linux/seq_file.h>
54 #include <linux/parser.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/resume_user_mode.h>
63 #include <linux/psi.h>
64 #include <linux/seq_buf.h>
65 #include <linux/sched/isolation.h>
66 #include <linux/kmemleak.h>
67 #include "internal.h"
68 #include <net/sock.h>
69 #include <net/ip.h>
70 #include "slab.h"
71 #include "memcontrol-v1.h"
72 
73 #include <linux/uaccess.h>
74 
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/memcg.h>
77 #undef CREATE_TRACE_POINTS
78 
79 #include <trace/events/vmscan.h>
80 
81 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
82 EXPORT_SYMBOL(memory_cgrp_subsys);
83 
84 struct mem_cgroup *root_mem_cgroup __read_mostly;
85 
86 /* Active memory cgroup to use from an interrupt context */
87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89 
90 /* Socket memory accounting disabled? */
91 static bool cgroup_memory_nosocket __ro_after_init;
92 
93 /* Kernel memory accounting disabled? */
94 static bool cgroup_memory_nokmem __ro_after_init;
95 
96 /* BPF memory accounting disabled? */
97 static bool cgroup_memory_nobpf __ro_after_init;
98 
99 static struct kmem_cache *memcg_cachep;
100 static struct kmem_cache *memcg_pn_cachep;
101 
102 #ifdef CONFIG_CGROUP_WRITEBACK
103 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
104 #endif
105 
task_is_dying(void)106 static inline bool task_is_dying(void)
107 {
108 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
109 		(current->flags & PF_EXITING);
110 }
111 
112 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)113 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
114 {
115 	if (!memcg)
116 		memcg = root_mem_cgroup;
117 	return &memcg->vmpressure;
118 }
119 
vmpressure_to_memcg(struct vmpressure * vmpr)120 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
121 {
122 	return container_of(vmpr, struct mem_cgroup, vmpressure);
123 }
124 
125 #define SEQ_BUF_SIZE SZ_4K
126 #define CURRENT_OBJCG_UPDATE_BIT 0
127 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
128 
129 static DEFINE_SPINLOCK(objcg_lock);
130 
mem_cgroup_kmem_disabled(void)131 bool mem_cgroup_kmem_disabled(void)
132 {
133 	return cgroup_memory_nokmem;
134 }
135 
136 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
137 
obj_cgroup_release(struct percpu_ref * ref)138 static void obj_cgroup_release(struct percpu_ref *ref)
139 {
140 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
141 	unsigned int nr_bytes;
142 	unsigned int nr_pages;
143 	unsigned long flags;
144 
145 	/*
146 	 * At this point all allocated objects are freed, and
147 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
148 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
149 	 *
150 	 * The following sequence can lead to it:
151 	 * 1) CPU0: objcg == stock->cached_objcg
152 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
153 	 *          PAGE_SIZE bytes are charged
154 	 * 3) CPU1: a process from another memcg is allocating something,
155 	 *          the stock if flushed,
156 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
157 	 * 5) CPU0: we do release this object,
158 	 *          92 bytes are added to stock->nr_bytes
159 	 * 6) CPU0: stock is flushed,
160 	 *          92 bytes are added to objcg->nr_charged_bytes
161 	 *
162 	 * In the result, nr_charged_bytes == PAGE_SIZE.
163 	 * This page will be uncharged in obj_cgroup_release().
164 	 */
165 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
166 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
167 	nr_pages = nr_bytes >> PAGE_SHIFT;
168 
169 	if (nr_pages) {
170 		struct mem_cgroup *memcg;
171 
172 		memcg = get_mem_cgroup_from_objcg(objcg);
173 		mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
174 		memcg1_account_kmem(memcg, -nr_pages);
175 		if (!mem_cgroup_is_root(memcg))
176 			memcg_uncharge(memcg, nr_pages);
177 		mem_cgroup_put(memcg);
178 	}
179 
180 	spin_lock_irqsave(&objcg_lock, flags);
181 	list_del(&objcg->list);
182 	spin_unlock_irqrestore(&objcg_lock, flags);
183 
184 	percpu_ref_exit(ref);
185 	kfree_rcu(objcg, rcu);
186 }
187 
obj_cgroup_alloc(void)188 static struct obj_cgroup *obj_cgroup_alloc(void)
189 {
190 	struct obj_cgroup *objcg;
191 	int ret;
192 
193 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
194 	if (!objcg)
195 		return NULL;
196 
197 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
198 			      GFP_KERNEL);
199 	if (ret) {
200 		kfree(objcg);
201 		return NULL;
202 	}
203 	INIT_LIST_HEAD(&objcg->list);
204 	return objcg;
205 }
206 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)207 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
208 				  struct mem_cgroup *parent)
209 {
210 	struct obj_cgroup *objcg, *iter;
211 
212 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
213 
214 	spin_lock_irq(&objcg_lock);
215 
216 	/* 1) Ready to reparent active objcg. */
217 	list_add(&objcg->list, &memcg->objcg_list);
218 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
219 	list_for_each_entry(iter, &memcg->objcg_list, list)
220 		WRITE_ONCE(iter->memcg, parent);
221 	/* 3) Move already reparented objcgs to the parent's list */
222 	list_splice(&memcg->objcg_list, &parent->objcg_list);
223 
224 	spin_unlock_irq(&objcg_lock);
225 
226 	percpu_ref_kill(&objcg->refcnt);
227 }
228 
229 /*
230  * A lot of the calls to the cache allocation functions are expected to be
231  * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
232  * conditional to this static branch, we'll have to allow modules that does
233  * kmem_cache_alloc and the such to see this symbol as well
234  */
235 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
236 EXPORT_SYMBOL(memcg_kmem_online_key);
237 
238 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
239 EXPORT_SYMBOL(memcg_bpf_enabled_key);
240 
241 /**
242  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
243  * @folio: folio of interest
244  *
245  * If memcg is bound to the default hierarchy, css of the memcg associated
246  * with @folio is returned.  The returned css remains associated with @folio
247  * until it is released.
248  *
249  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
250  * is returned.
251  */
mem_cgroup_css_from_folio(struct folio * folio)252 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
253 {
254 	struct mem_cgroup *memcg = folio_memcg(folio);
255 
256 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
257 		memcg = root_mem_cgroup;
258 
259 	return &memcg->css;
260 }
261 
262 /**
263  * page_cgroup_ino - return inode number of the memcg a page is charged to
264  * @page: the page
265  *
266  * Look up the closest online ancestor of the memory cgroup @page is charged to
267  * and return its inode number or 0 if @page is not charged to any cgroup. It
268  * is safe to call this function without holding a reference to @page.
269  *
270  * Note, this function is inherently racy, because there is nothing to prevent
271  * the cgroup inode from getting torn down and potentially reallocated a moment
272  * after page_cgroup_ino() returns, so it only should be used by callers that
273  * do not care (such as procfs interfaces).
274  */
page_cgroup_ino(struct page * page)275 ino_t page_cgroup_ino(struct page *page)
276 {
277 	struct mem_cgroup *memcg;
278 	unsigned long ino = 0;
279 
280 	rcu_read_lock();
281 	/* page_folio() is racy here, but the entire function is racy anyway */
282 	memcg = folio_memcg_check(page_folio(page));
283 
284 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
285 		memcg = parent_mem_cgroup(memcg);
286 	if (memcg)
287 		ino = cgroup_ino(memcg->css.cgroup);
288 	rcu_read_unlock();
289 	return ino;
290 }
291 
292 /* Subset of node_stat_item for memcg stats */
293 static const unsigned int memcg_node_stat_items[] = {
294 	NR_INACTIVE_ANON,
295 	NR_ACTIVE_ANON,
296 	NR_INACTIVE_FILE,
297 	NR_ACTIVE_FILE,
298 	NR_UNEVICTABLE,
299 	NR_SLAB_RECLAIMABLE_B,
300 	NR_SLAB_UNRECLAIMABLE_B,
301 	WORKINGSET_REFAULT_ANON,
302 	WORKINGSET_REFAULT_FILE,
303 	WORKINGSET_ACTIVATE_ANON,
304 	WORKINGSET_ACTIVATE_FILE,
305 	WORKINGSET_RESTORE_ANON,
306 	WORKINGSET_RESTORE_FILE,
307 	WORKINGSET_NODERECLAIM,
308 	NR_ANON_MAPPED,
309 	NR_FILE_MAPPED,
310 	NR_FILE_PAGES,
311 	NR_FILE_DIRTY,
312 	NR_WRITEBACK,
313 	NR_SHMEM,
314 	NR_SHMEM_THPS,
315 	NR_FILE_THPS,
316 	NR_ANON_THPS,
317 	NR_KERNEL_STACK_KB,
318 	NR_PAGETABLE,
319 	NR_SECONDARY_PAGETABLE,
320 #ifdef CONFIG_SWAP
321 	NR_SWAPCACHE,
322 #endif
323 #ifdef CONFIG_NUMA_BALANCING
324 	PGPROMOTE_SUCCESS,
325 #endif
326 	PGDEMOTE_KSWAPD,
327 	PGDEMOTE_DIRECT,
328 	PGDEMOTE_KHUGEPAGED,
329 	PGDEMOTE_PROACTIVE,
330 #ifdef CONFIG_HUGETLB_PAGE
331 	NR_HUGETLB,
332 #endif
333 };
334 
335 static const unsigned int memcg_stat_items[] = {
336 	MEMCG_SWAP,
337 	MEMCG_SOCK,
338 	MEMCG_PERCPU_B,
339 	MEMCG_VMALLOC,
340 	MEMCG_KMEM,
341 	MEMCG_ZSWAP_B,
342 	MEMCG_ZSWAPPED,
343 };
344 
345 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
346 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
347 			   ARRAY_SIZE(memcg_stat_items))
348 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
349 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
350 
init_memcg_stats(void)351 static void init_memcg_stats(void)
352 {
353 	u8 i, j = 0;
354 
355 	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
356 
357 	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
358 
359 	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
360 		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
361 
362 	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
363 		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
364 }
365 
memcg_stats_index(int idx)366 static inline int memcg_stats_index(int idx)
367 {
368 	return mem_cgroup_stats_index[idx];
369 }
370 
371 struct lruvec_stats_percpu {
372 	/* Local (CPU and cgroup) state */
373 	long state[NR_MEMCG_NODE_STAT_ITEMS];
374 
375 	/* Delta calculation for lockless upward propagation */
376 	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
377 };
378 
379 struct lruvec_stats {
380 	/* Aggregated (CPU and subtree) state */
381 	long state[NR_MEMCG_NODE_STAT_ITEMS];
382 
383 	/* Non-hierarchical (CPU aggregated) state */
384 	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
385 
386 	/* Pending child counts during tree propagation */
387 	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
388 };
389 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)390 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
391 {
392 	struct mem_cgroup_per_node *pn;
393 	long x;
394 	int i;
395 
396 	if (mem_cgroup_disabled())
397 		return node_page_state(lruvec_pgdat(lruvec), idx);
398 
399 	i = memcg_stats_index(idx);
400 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
401 		return 0;
402 
403 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
404 	x = READ_ONCE(pn->lruvec_stats->state[i]);
405 #ifdef CONFIG_SMP
406 	if (x < 0)
407 		x = 0;
408 #endif
409 	return x;
410 }
411 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)412 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
413 				      enum node_stat_item idx)
414 {
415 	struct mem_cgroup_per_node *pn;
416 	long x;
417 	int i;
418 
419 	if (mem_cgroup_disabled())
420 		return node_page_state(lruvec_pgdat(lruvec), idx);
421 
422 	i = memcg_stats_index(idx);
423 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
424 		return 0;
425 
426 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
427 	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
428 #ifdef CONFIG_SMP
429 	if (x < 0)
430 		x = 0;
431 #endif
432 	return x;
433 }
434 
435 /* Subset of vm_event_item to report for memcg event stats */
436 static const unsigned int memcg_vm_event_stat[] = {
437 #ifdef CONFIG_MEMCG_V1
438 	PGPGIN,
439 	PGPGOUT,
440 #endif
441 	PSWPIN,
442 	PSWPOUT,
443 	PGSCAN_KSWAPD,
444 	PGSCAN_DIRECT,
445 	PGSCAN_KHUGEPAGED,
446 	PGSCAN_PROACTIVE,
447 	PGSTEAL_KSWAPD,
448 	PGSTEAL_DIRECT,
449 	PGSTEAL_KHUGEPAGED,
450 	PGSTEAL_PROACTIVE,
451 	PGFAULT,
452 	PGMAJFAULT,
453 	PGREFILL,
454 	PGACTIVATE,
455 	PGDEACTIVATE,
456 	PGLAZYFREE,
457 	PGLAZYFREED,
458 #ifdef CONFIG_SWAP
459 	SWPIN_ZERO,
460 	SWPOUT_ZERO,
461 #endif
462 #ifdef CONFIG_ZSWAP
463 	ZSWPIN,
464 	ZSWPOUT,
465 	ZSWPWB,
466 #endif
467 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
468 	THP_FAULT_ALLOC,
469 	THP_COLLAPSE_ALLOC,
470 	THP_SWPOUT,
471 	THP_SWPOUT_FALLBACK,
472 #endif
473 #ifdef CONFIG_NUMA_BALANCING
474 	NUMA_PAGE_MIGRATE,
475 	NUMA_PTE_UPDATES,
476 	NUMA_HINT_FAULTS,
477 #endif
478 };
479 
480 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
481 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
482 
init_memcg_events(void)483 static void init_memcg_events(void)
484 {
485 	u8 i;
486 
487 	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
488 
489 	memset(mem_cgroup_events_index, U8_MAX,
490 	       sizeof(mem_cgroup_events_index));
491 
492 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
493 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
494 }
495 
memcg_events_index(enum vm_event_item idx)496 static inline int memcg_events_index(enum vm_event_item idx)
497 {
498 	return mem_cgroup_events_index[idx];
499 }
500 
501 struct memcg_vmstats_percpu {
502 	/* Stats updates since the last flush */
503 	unsigned int			stats_updates;
504 
505 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
506 	struct memcg_vmstats_percpu __percpu	*parent_pcpu;
507 	struct memcg_vmstats			*vmstats;
508 
509 	/* The above should fit a single cacheline for memcg_rstat_updated() */
510 
511 	/* Local (CPU and cgroup) page state & events */
512 	long			state[MEMCG_VMSTAT_SIZE];
513 	unsigned long		events[NR_MEMCG_EVENTS];
514 
515 	/* Delta calculation for lockless upward propagation */
516 	long			state_prev[MEMCG_VMSTAT_SIZE];
517 	unsigned long		events_prev[NR_MEMCG_EVENTS];
518 } ____cacheline_aligned;
519 
520 struct memcg_vmstats {
521 	/* Aggregated (CPU and subtree) page state & events */
522 	long			state[MEMCG_VMSTAT_SIZE];
523 	unsigned long		events[NR_MEMCG_EVENTS];
524 
525 	/* Non-hierarchical (CPU aggregated) page state & events */
526 	long			state_local[MEMCG_VMSTAT_SIZE];
527 	unsigned long		events_local[NR_MEMCG_EVENTS];
528 
529 	/* Pending child counts during tree propagation */
530 	long			state_pending[MEMCG_VMSTAT_SIZE];
531 	unsigned long		events_pending[NR_MEMCG_EVENTS];
532 
533 	/* Stats updates since the last flush */
534 	atomic_t		stats_updates;
535 };
536 
537 /*
538  * memcg and lruvec stats flushing
539  *
540  * Many codepaths leading to stats update or read are performance sensitive and
541  * adding stats flushing in such codepaths is not desirable. So, to optimize the
542  * flushing the kernel does:
543  *
544  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
545  *    rstat update tree grow unbounded.
546  *
547  * 2) Flush the stats synchronously on reader side only when there are more than
548  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
549  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
550  *    only for 2 seconds due to (1).
551  */
552 static void flush_memcg_stats_dwork(struct work_struct *w);
553 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
554 static u64 flush_last_time;
555 
556 #define FLUSH_TIME (2UL*HZ)
557 
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)558 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
559 {
560 	return atomic_read(&vmstats->stats_updates) >
561 		MEMCG_CHARGE_BATCH * num_online_cpus();
562 }
563 
memcg_rstat_updated(struct mem_cgroup * memcg,int val,int cpu)564 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
565 				       int cpu)
566 {
567 	struct memcg_vmstats_percpu __percpu *statc_pcpu;
568 	struct memcg_vmstats_percpu *statc;
569 	unsigned int stats_updates;
570 
571 	if (!val)
572 		return;
573 
574 	/* TODO: add to cgroup update tree once it is nmi-safe. */
575 	if (!in_nmi())
576 		css_rstat_updated(&memcg->css, cpu);
577 	statc_pcpu = memcg->vmstats_percpu;
578 	for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
579 		statc = this_cpu_ptr(statc_pcpu);
580 		/*
581 		 * If @memcg is already flushable then all its ancestors are
582 		 * flushable as well and also there is no need to increase
583 		 * stats_updates.
584 		 */
585 		if (memcg_vmstats_needs_flush(statc->vmstats))
586 			break;
587 
588 		stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
589 						    abs(val));
590 		if (stats_updates < MEMCG_CHARGE_BATCH)
591 			continue;
592 
593 		stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
594 		atomic_add(stats_updates, &statc->vmstats->stats_updates);
595 	}
596 }
597 
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)598 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
599 {
600 	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
601 
602 	trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
603 		force, needs_flush);
604 
605 	if (!force && !needs_flush)
606 		return;
607 
608 	if (mem_cgroup_is_root(memcg))
609 		WRITE_ONCE(flush_last_time, jiffies_64);
610 
611 	css_rstat_flush(&memcg->css);
612 }
613 
614 /*
615  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
616  * @memcg: root of the subtree to flush
617  *
618  * Flushing is serialized by the underlying global rstat lock. There is also a
619  * minimum amount of work to be done even if there are no stat updates to flush.
620  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
621  * avoids unnecessary work and contention on the underlying lock.
622  */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)623 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
624 {
625 	if (mem_cgroup_disabled())
626 		return;
627 
628 	if (!memcg)
629 		memcg = root_mem_cgroup;
630 
631 	__mem_cgroup_flush_stats(memcg, false);
632 }
633 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)634 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
635 {
636 	/* Only flush if the periodic flusher is one full cycle late */
637 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
638 		mem_cgroup_flush_stats(memcg);
639 }
640 
flush_memcg_stats_dwork(struct work_struct * w)641 static void flush_memcg_stats_dwork(struct work_struct *w)
642 {
643 	/*
644 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
645 	 * in latency-sensitive paths is as cheap as possible.
646 	 */
647 	__mem_cgroup_flush_stats(root_mem_cgroup, true);
648 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
649 }
650 
memcg_page_state(struct mem_cgroup * memcg,int idx)651 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
652 {
653 	long x;
654 	int i = memcg_stats_index(idx);
655 
656 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
657 		return 0;
658 
659 	x = READ_ONCE(memcg->vmstats->state[i]);
660 #ifdef CONFIG_SMP
661 	if (x < 0)
662 		x = 0;
663 #endif
664 	return x;
665 }
666 
667 static int memcg_page_state_unit(int item);
668 
669 /*
670  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
671  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
672  */
memcg_state_val_in_pages(int idx,int val)673 static int memcg_state_val_in_pages(int idx, int val)
674 {
675 	int unit = memcg_page_state_unit(idx);
676 
677 	if (!val || unit == PAGE_SIZE)
678 		return val;
679 	else
680 		return max(val * unit / PAGE_SIZE, 1UL);
681 }
682 
683 /**
684  * mod_memcg_state - update cgroup memory statistics
685  * @memcg: the memory cgroup
686  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
687  * @val: delta to add to the counter, can be negative
688  */
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)689 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
690 		       int val)
691 {
692 	int i = memcg_stats_index(idx);
693 	int cpu;
694 
695 	if (mem_cgroup_disabled())
696 		return;
697 
698 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
699 		return;
700 
701 	cpu = get_cpu();
702 
703 	this_cpu_add(memcg->vmstats_percpu->state[i], val);
704 	val = memcg_state_val_in_pages(idx, val);
705 	memcg_rstat_updated(memcg, val, cpu);
706 	trace_mod_memcg_state(memcg, idx, val);
707 
708 	put_cpu();
709 }
710 
711 #ifdef CONFIG_MEMCG_V1
712 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)713 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
714 {
715 	long x;
716 	int i = memcg_stats_index(idx);
717 
718 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
719 		return 0;
720 
721 	x = READ_ONCE(memcg->vmstats->state_local[i]);
722 #ifdef CONFIG_SMP
723 	if (x < 0)
724 		x = 0;
725 #endif
726 	return x;
727 }
728 #endif
729 
mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)730 static void mod_memcg_lruvec_state(struct lruvec *lruvec,
731 				     enum node_stat_item idx,
732 				     int val)
733 {
734 	struct mem_cgroup_per_node *pn;
735 	struct mem_cgroup *memcg;
736 	int i = memcg_stats_index(idx);
737 	int cpu;
738 
739 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
740 		return;
741 
742 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
743 	memcg = pn->memcg;
744 
745 	cpu = get_cpu();
746 
747 	/* Update memcg */
748 	this_cpu_add(memcg->vmstats_percpu->state[i], val);
749 
750 	/* Update lruvec */
751 	this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
752 
753 	val = memcg_state_val_in_pages(idx, val);
754 	memcg_rstat_updated(memcg, val, cpu);
755 	trace_mod_memcg_lruvec_state(memcg, idx, val);
756 
757 	put_cpu();
758 }
759 
760 /**
761  * __mod_lruvec_state - update lruvec memory statistics
762  * @lruvec: the lruvec
763  * @idx: the stat item
764  * @val: delta to add to the counter, can be negative
765  *
766  * The lruvec is the intersection of the NUMA node and a cgroup. This
767  * function updates the all three counters that are affected by a
768  * change of state at this level: per-node, per-cgroup, per-lruvec.
769  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)770 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
771 			int val)
772 {
773 	/* Update node */
774 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
775 
776 	/* Update memcg and lruvec */
777 	if (!mem_cgroup_disabled())
778 		mod_memcg_lruvec_state(lruvec, idx, val);
779 }
780 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)781 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
782 			     int val)
783 {
784 	struct mem_cgroup *memcg;
785 	pg_data_t *pgdat = folio_pgdat(folio);
786 	struct lruvec *lruvec;
787 
788 	rcu_read_lock();
789 	memcg = folio_memcg(folio);
790 	/* Untracked pages have no memcg, no lruvec. Update only the node */
791 	if (!memcg) {
792 		rcu_read_unlock();
793 		__mod_node_page_state(pgdat, idx, val);
794 		return;
795 	}
796 
797 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
798 	__mod_lruvec_state(lruvec, idx, val);
799 	rcu_read_unlock();
800 }
801 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
802 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)803 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
804 {
805 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
806 	struct mem_cgroup *memcg;
807 	struct lruvec *lruvec;
808 
809 	rcu_read_lock();
810 	memcg = mem_cgroup_from_slab_obj(p);
811 
812 	/*
813 	 * Untracked pages have no memcg, no lruvec. Update only the
814 	 * node. If we reparent the slab objects to the root memcg,
815 	 * when we free the slab object, we need to update the per-memcg
816 	 * vmstats to keep it correct for the root memcg.
817 	 */
818 	if (!memcg) {
819 		__mod_node_page_state(pgdat, idx, val);
820 	} else {
821 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
822 		__mod_lruvec_state(lruvec, idx, val);
823 	}
824 	rcu_read_unlock();
825 }
826 
827 /**
828  * count_memcg_events - account VM events in a cgroup
829  * @memcg: the memory cgroup
830  * @idx: the event item
831  * @count: the number of events that occurred
832  */
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)833 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
834 			  unsigned long count)
835 {
836 	int i = memcg_events_index(idx);
837 	int cpu;
838 
839 	if (mem_cgroup_disabled())
840 		return;
841 
842 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
843 		return;
844 
845 	cpu = get_cpu();
846 
847 	this_cpu_add(memcg->vmstats_percpu->events[i], count);
848 	memcg_rstat_updated(memcg, count, cpu);
849 	trace_count_memcg_events(memcg, idx, count);
850 
851 	put_cpu();
852 }
853 
memcg_events(struct mem_cgroup * memcg,int event)854 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
855 {
856 	int i = memcg_events_index(event);
857 
858 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
859 		return 0;
860 
861 	return READ_ONCE(memcg->vmstats->events[i]);
862 }
863 
864 #ifdef CONFIG_MEMCG_V1
memcg_events_local(struct mem_cgroup * memcg,int event)865 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
866 {
867 	int i = memcg_events_index(event);
868 
869 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
870 		return 0;
871 
872 	return READ_ONCE(memcg->vmstats->events_local[i]);
873 }
874 #endif
875 
mem_cgroup_from_task(struct task_struct * p)876 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
877 {
878 	/*
879 	 * mm_update_next_owner() may clear mm->owner to NULL
880 	 * if it races with swapoff, page migration, etc.
881 	 * So this can be called with p == NULL.
882 	 */
883 	if (unlikely(!p))
884 		return NULL;
885 
886 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
887 }
888 EXPORT_SYMBOL(mem_cgroup_from_task);
889 
active_memcg(void)890 static __always_inline struct mem_cgroup *active_memcg(void)
891 {
892 	if (!in_task())
893 		return this_cpu_read(int_active_memcg);
894 	else
895 		return current->active_memcg;
896 }
897 
898 /**
899  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
900  * @mm: mm from which memcg should be extracted. It can be NULL.
901  *
902  * Obtain a reference on mm->memcg and returns it if successful. If mm
903  * is NULL, then the memcg is chosen as follows:
904  * 1) The active memcg, if set.
905  * 2) current->mm->memcg, if available
906  * 3) root memcg
907  * If mem_cgroup is disabled, NULL is returned.
908  */
get_mem_cgroup_from_mm(struct mm_struct * mm)909 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
910 {
911 	struct mem_cgroup *memcg;
912 
913 	if (mem_cgroup_disabled())
914 		return NULL;
915 
916 	/*
917 	 * Page cache insertions can happen without an
918 	 * actual mm context, e.g. during disk probing
919 	 * on boot, loopback IO, acct() writes etc.
920 	 *
921 	 * No need to css_get on root memcg as the reference
922 	 * counting is disabled on the root level in the
923 	 * cgroup core. See CSS_NO_REF.
924 	 */
925 	if (unlikely(!mm)) {
926 		memcg = active_memcg();
927 		if (unlikely(memcg)) {
928 			/* remote memcg must hold a ref */
929 			css_get(&memcg->css);
930 			return memcg;
931 		}
932 		mm = current->mm;
933 		if (unlikely(!mm))
934 			return root_mem_cgroup;
935 	}
936 
937 	rcu_read_lock();
938 	do {
939 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
940 		if (unlikely(!memcg))
941 			memcg = root_mem_cgroup;
942 	} while (!css_tryget(&memcg->css));
943 	rcu_read_unlock();
944 	return memcg;
945 }
946 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
947 
948 /**
949  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
950  */
get_mem_cgroup_from_current(void)951 struct mem_cgroup *get_mem_cgroup_from_current(void)
952 {
953 	struct mem_cgroup *memcg;
954 
955 	if (mem_cgroup_disabled())
956 		return NULL;
957 
958 again:
959 	rcu_read_lock();
960 	memcg = mem_cgroup_from_task(current);
961 	if (!css_tryget(&memcg->css)) {
962 		rcu_read_unlock();
963 		goto again;
964 	}
965 	rcu_read_unlock();
966 	return memcg;
967 }
968 
969 /**
970  * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
971  * @folio: folio from which memcg should be extracted.
972  */
get_mem_cgroup_from_folio(struct folio * folio)973 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
974 {
975 	struct mem_cgroup *memcg = folio_memcg(folio);
976 
977 	if (mem_cgroup_disabled())
978 		return NULL;
979 
980 	rcu_read_lock();
981 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
982 		memcg = root_mem_cgroup;
983 	rcu_read_unlock();
984 	return memcg;
985 }
986 
987 /**
988  * mem_cgroup_iter - iterate over memory cgroup hierarchy
989  * @root: hierarchy root
990  * @prev: previously returned memcg, NULL on first invocation
991  * @reclaim: cookie for shared reclaim walks, NULL for full walks
992  *
993  * Returns references to children of the hierarchy below @root, or
994  * @root itself, or %NULL after a full round-trip.
995  *
996  * Caller must pass the return value in @prev on subsequent
997  * invocations for reference counting, or use mem_cgroup_iter_break()
998  * to cancel a hierarchy walk before the round-trip is complete.
999  *
1000  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1001  * in the hierarchy among all concurrent reclaimers operating on the
1002  * same node.
1003  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1004 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1005 				   struct mem_cgroup *prev,
1006 				   struct mem_cgroup_reclaim_cookie *reclaim)
1007 {
1008 	struct mem_cgroup_reclaim_iter *iter;
1009 	struct cgroup_subsys_state *css;
1010 	struct mem_cgroup *pos;
1011 	struct mem_cgroup *next;
1012 
1013 	if (mem_cgroup_disabled())
1014 		return NULL;
1015 
1016 	if (!root)
1017 		root = root_mem_cgroup;
1018 
1019 	rcu_read_lock();
1020 restart:
1021 	next = NULL;
1022 
1023 	if (reclaim) {
1024 		int gen;
1025 		int nid = reclaim->pgdat->node_id;
1026 
1027 		iter = &root->nodeinfo[nid]->iter;
1028 		gen = atomic_read(&iter->generation);
1029 
1030 		/*
1031 		 * On start, join the current reclaim iteration cycle.
1032 		 * Exit when a concurrent walker completes it.
1033 		 */
1034 		if (!prev)
1035 			reclaim->generation = gen;
1036 		else if (reclaim->generation != gen)
1037 			goto out_unlock;
1038 
1039 		pos = READ_ONCE(iter->position);
1040 	} else
1041 		pos = prev;
1042 
1043 	css = pos ? &pos->css : NULL;
1044 
1045 	while ((css = css_next_descendant_pre(css, &root->css))) {
1046 		/*
1047 		 * Verify the css and acquire a reference.  The root
1048 		 * is provided by the caller, so we know it's alive
1049 		 * and kicking, and don't take an extra reference.
1050 		 */
1051 		if (css == &root->css || css_tryget(css))
1052 			break;
1053 	}
1054 
1055 	next = mem_cgroup_from_css(css);
1056 
1057 	if (reclaim) {
1058 		/*
1059 		 * The position could have already been updated by a competing
1060 		 * thread, so check that the value hasn't changed since we read
1061 		 * it to avoid reclaiming from the same cgroup twice.
1062 		 */
1063 		if (cmpxchg(&iter->position, pos, next) != pos) {
1064 			if (css && css != &root->css)
1065 				css_put(css);
1066 			goto restart;
1067 		}
1068 
1069 		if (!next) {
1070 			atomic_inc(&iter->generation);
1071 
1072 			/*
1073 			 * Reclaimers share the hierarchy walk, and a
1074 			 * new one might jump in right at the end of
1075 			 * the hierarchy - make sure they see at least
1076 			 * one group and restart from the beginning.
1077 			 */
1078 			if (!prev)
1079 				goto restart;
1080 		}
1081 	}
1082 
1083 out_unlock:
1084 	rcu_read_unlock();
1085 	if (prev && prev != root)
1086 		css_put(&prev->css);
1087 
1088 	return next;
1089 }
1090 
1091 /**
1092  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1093  * @root: hierarchy root
1094  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1095  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1096 void mem_cgroup_iter_break(struct mem_cgroup *root,
1097 			   struct mem_cgroup *prev)
1098 {
1099 	if (!root)
1100 		root = root_mem_cgroup;
1101 	if (prev && prev != root)
1102 		css_put(&prev->css);
1103 }
1104 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1105 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1106 					struct mem_cgroup *dead_memcg)
1107 {
1108 	struct mem_cgroup_reclaim_iter *iter;
1109 	struct mem_cgroup_per_node *mz;
1110 	int nid;
1111 
1112 	for_each_node(nid) {
1113 		mz = from->nodeinfo[nid];
1114 		iter = &mz->iter;
1115 		cmpxchg(&iter->position, dead_memcg, NULL);
1116 	}
1117 }
1118 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1119 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1120 {
1121 	struct mem_cgroup *memcg = dead_memcg;
1122 	struct mem_cgroup *last;
1123 
1124 	do {
1125 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1126 		last = memcg;
1127 	} while ((memcg = parent_mem_cgroup(memcg)));
1128 
1129 	/*
1130 	 * When cgroup1 non-hierarchy mode is used,
1131 	 * parent_mem_cgroup() does not walk all the way up to the
1132 	 * cgroup root (root_mem_cgroup). So we have to handle
1133 	 * dead_memcg from cgroup root separately.
1134 	 */
1135 	if (!mem_cgroup_is_root(last))
1136 		__invalidate_reclaim_iterators(root_mem_cgroup,
1137 						dead_memcg);
1138 }
1139 
1140 /**
1141  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1142  * @memcg: hierarchy root
1143  * @fn: function to call for each task
1144  * @arg: argument passed to @fn
1145  *
1146  * This function iterates over tasks attached to @memcg or to any of its
1147  * descendants and calls @fn for each task. If @fn returns a non-zero
1148  * value, the function breaks the iteration loop. Otherwise, it will iterate
1149  * over all tasks and return 0.
1150  *
1151  * This function must not be called for the root memory cgroup.
1152  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1153 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1154 			   int (*fn)(struct task_struct *, void *), void *arg)
1155 {
1156 	struct mem_cgroup *iter;
1157 	int ret = 0;
1158 
1159 	BUG_ON(mem_cgroup_is_root(memcg));
1160 
1161 	for_each_mem_cgroup_tree(iter, memcg) {
1162 		struct css_task_iter it;
1163 		struct task_struct *task;
1164 
1165 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1166 		while (!ret && (task = css_task_iter_next(&it))) {
1167 			ret = fn(task, arg);
1168 			/* Avoid potential softlockup warning */
1169 			cond_resched();
1170 		}
1171 		css_task_iter_end(&it);
1172 		if (ret) {
1173 			mem_cgroup_iter_break(memcg, iter);
1174 			break;
1175 		}
1176 	}
1177 }
1178 
1179 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1180 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1181 {
1182 	struct mem_cgroup *memcg;
1183 
1184 	if (mem_cgroup_disabled())
1185 		return;
1186 
1187 	memcg = folio_memcg(folio);
1188 
1189 	if (!memcg)
1190 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1191 	else
1192 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1193 }
1194 #endif
1195 
1196 /**
1197  * folio_lruvec_lock - Lock the lruvec for a folio.
1198  * @folio: Pointer to the folio.
1199  *
1200  * These functions are safe to use under any of the following conditions:
1201  * - folio locked
1202  * - folio_test_lru false
1203  * - folio frozen (refcount of 0)
1204  *
1205  * Return: The lruvec this folio is on with its lock held.
1206  */
folio_lruvec_lock(struct folio * folio)1207 struct lruvec *folio_lruvec_lock(struct folio *folio)
1208 {
1209 	struct lruvec *lruvec = folio_lruvec(folio);
1210 
1211 	spin_lock(&lruvec->lru_lock);
1212 	lruvec_memcg_debug(lruvec, folio);
1213 
1214 	return lruvec;
1215 }
1216 
1217 /**
1218  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1219  * @folio: Pointer to the folio.
1220  *
1221  * These functions are safe to use under any of the following conditions:
1222  * - folio locked
1223  * - folio_test_lru false
1224  * - folio frozen (refcount of 0)
1225  *
1226  * Return: The lruvec this folio is on with its lock held and interrupts
1227  * disabled.
1228  */
folio_lruvec_lock_irq(struct folio * folio)1229 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1230 {
1231 	struct lruvec *lruvec = folio_lruvec(folio);
1232 
1233 	spin_lock_irq(&lruvec->lru_lock);
1234 	lruvec_memcg_debug(lruvec, folio);
1235 
1236 	return lruvec;
1237 }
1238 
1239 /**
1240  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1241  * @folio: Pointer to the folio.
1242  * @flags: Pointer to irqsave flags.
1243  *
1244  * These functions are safe to use under any of the following conditions:
1245  * - folio locked
1246  * - folio_test_lru false
1247  * - folio frozen (refcount of 0)
1248  *
1249  * Return: The lruvec this folio is on with its lock held and interrupts
1250  * disabled.
1251  */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1252 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1253 		unsigned long *flags)
1254 {
1255 	struct lruvec *lruvec = folio_lruvec(folio);
1256 
1257 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1258 	lruvec_memcg_debug(lruvec, folio);
1259 
1260 	return lruvec;
1261 }
1262 
1263 /**
1264  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1265  * @lruvec: mem_cgroup per zone lru vector
1266  * @lru: index of lru list the page is sitting on
1267  * @zid: zone id of the accounted pages
1268  * @nr_pages: positive when adding or negative when removing
1269  *
1270  * This function must be called under lru_lock, just before a page is added
1271  * to or just after a page is removed from an lru list.
1272  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1273 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1274 				int zid, int nr_pages)
1275 {
1276 	struct mem_cgroup_per_node *mz;
1277 	unsigned long *lru_size;
1278 	long size;
1279 
1280 	if (mem_cgroup_disabled())
1281 		return;
1282 
1283 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1284 	lru_size = &mz->lru_zone_size[zid][lru];
1285 
1286 	if (nr_pages < 0)
1287 		*lru_size += nr_pages;
1288 
1289 	size = *lru_size;
1290 	if (WARN_ONCE(size < 0,
1291 		"%s(%p, %d, %d): lru_size %ld\n",
1292 		__func__, lruvec, lru, nr_pages, size)) {
1293 		VM_BUG_ON(1);
1294 		*lru_size = 0;
1295 	}
1296 
1297 	if (nr_pages > 0)
1298 		*lru_size += nr_pages;
1299 }
1300 
1301 /**
1302  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1303  * @memcg: the memory cgroup
1304  *
1305  * Returns the maximum amount of memory @mem can be charged with, in
1306  * pages.
1307  */
mem_cgroup_margin(struct mem_cgroup * memcg)1308 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1309 {
1310 	unsigned long margin = 0;
1311 	unsigned long count;
1312 	unsigned long limit;
1313 
1314 	count = page_counter_read(&memcg->memory);
1315 	limit = READ_ONCE(memcg->memory.max);
1316 	if (count < limit)
1317 		margin = limit - count;
1318 
1319 	if (do_memsw_account()) {
1320 		count = page_counter_read(&memcg->memsw);
1321 		limit = READ_ONCE(memcg->memsw.max);
1322 		if (count < limit)
1323 			margin = min(margin, limit - count);
1324 		else
1325 			margin = 0;
1326 	}
1327 
1328 	return margin;
1329 }
1330 
1331 struct memory_stat {
1332 	const char *name;
1333 	unsigned int idx;
1334 };
1335 
1336 static const struct memory_stat memory_stats[] = {
1337 	{ "anon",			NR_ANON_MAPPED			},
1338 	{ "file",			NR_FILE_PAGES			},
1339 	{ "kernel",			MEMCG_KMEM			},
1340 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1341 	{ "pagetables",			NR_PAGETABLE			},
1342 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1343 	{ "percpu",			MEMCG_PERCPU_B			},
1344 	{ "sock",			MEMCG_SOCK			},
1345 	{ "vmalloc",			MEMCG_VMALLOC			},
1346 	{ "shmem",			NR_SHMEM			},
1347 #ifdef CONFIG_ZSWAP
1348 	{ "zswap",			MEMCG_ZSWAP_B			},
1349 	{ "zswapped",			MEMCG_ZSWAPPED			},
1350 #endif
1351 	{ "file_mapped",		NR_FILE_MAPPED			},
1352 	{ "file_dirty",			NR_FILE_DIRTY			},
1353 	{ "file_writeback",		NR_WRITEBACK			},
1354 #ifdef CONFIG_SWAP
1355 	{ "swapcached",			NR_SWAPCACHE			},
1356 #endif
1357 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1358 	{ "anon_thp",			NR_ANON_THPS			},
1359 	{ "file_thp",			NR_FILE_THPS			},
1360 	{ "shmem_thp",			NR_SHMEM_THPS			},
1361 #endif
1362 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1363 	{ "active_anon",		NR_ACTIVE_ANON			},
1364 	{ "inactive_file",		NR_INACTIVE_FILE		},
1365 	{ "active_file",		NR_ACTIVE_FILE			},
1366 	{ "unevictable",		NR_UNEVICTABLE			},
1367 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1368 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1369 #ifdef CONFIG_HUGETLB_PAGE
1370 	{ "hugetlb",			NR_HUGETLB			},
1371 #endif
1372 
1373 	/* The memory events */
1374 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1375 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1376 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1377 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1378 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1379 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1380 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1381 
1382 	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1383 	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1384 	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1385 	{ "pgdemote_proactive",		PGDEMOTE_PROACTIVE	},
1386 #ifdef CONFIG_NUMA_BALANCING
1387 	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1388 #endif
1389 };
1390 
1391 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1392 static int memcg_page_state_unit(int item)
1393 {
1394 	switch (item) {
1395 	case MEMCG_PERCPU_B:
1396 	case MEMCG_ZSWAP_B:
1397 	case NR_SLAB_RECLAIMABLE_B:
1398 	case NR_SLAB_UNRECLAIMABLE_B:
1399 		return 1;
1400 	case NR_KERNEL_STACK_KB:
1401 		return SZ_1K;
1402 	default:
1403 		return PAGE_SIZE;
1404 	}
1405 }
1406 
1407 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1408 static int memcg_page_state_output_unit(int item)
1409 {
1410 	/*
1411 	 * Workingset state is actually in pages, but we export it to userspace
1412 	 * as a scalar count of events, so special case it here.
1413 	 *
1414 	 * Demotion and promotion activities are exported in pages, consistent
1415 	 * with their global counterparts.
1416 	 */
1417 	switch (item) {
1418 	case WORKINGSET_REFAULT_ANON:
1419 	case WORKINGSET_REFAULT_FILE:
1420 	case WORKINGSET_ACTIVATE_ANON:
1421 	case WORKINGSET_ACTIVATE_FILE:
1422 	case WORKINGSET_RESTORE_ANON:
1423 	case WORKINGSET_RESTORE_FILE:
1424 	case WORKINGSET_NODERECLAIM:
1425 	case PGDEMOTE_KSWAPD:
1426 	case PGDEMOTE_DIRECT:
1427 	case PGDEMOTE_KHUGEPAGED:
1428 	case PGDEMOTE_PROACTIVE:
1429 #ifdef CONFIG_NUMA_BALANCING
1430 	case PGPROMOTE_SUCCESS:
1431 #endif
1432 		return 1;
1433 	default:
1434 		return memcg_page_state_unit(item);
1435 	}
1436 }
1437 
memcg_page_state_output(struct mem_cgroup * memcg,int item)1438 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1439 {
1440 	return memcg_page_state(memcg, item) *
1441 		memcg_page_state_output_unit(item);
1442 }
1443 
1444 #ifdef CONFIG_MEMCG_V1
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1445 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1446 {
1447 	return memcg_page_state_local(memcg, item) *
1448 		memcg_page_state_output_unit(item);
1449 }
1450 #endif
1451 
1452 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1453 static bool memcg_accounts_hugetlb(void)
1454 {
1455 	return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1456 }
1457 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1458 static bool memcg_accounts_hugetlb(void)
1459 {
1460 	return false;
1461 }
1462 #endif /* CONFIG_HUGETLB_PAGE */
1463 
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1464 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1465 {
1466 	int i;
1467 
1468 	/*
1469 	 * Provide statistics on the state of the memory subsystem as
1470 	 * well as cumulative event counters that show past behavior.
1471 	 *
1472 	 * This list is ordered following a combination of these gradients:
1473 	 * 1) generic big picture -> specifics and details
1474 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1475 	 *
1476 	 * Current memory state:
1477 	 */
1478 	mem_cgroup_flush_stats(memcg);
1479 
1480 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1481 		u64 size;
1482 
1483 #ifdef CONFIG_HUGETLB_PAGE
1484 		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1485 			!memcg_accounts_hugetlb())
1486 			continue;
1487 #endif
1488 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1489 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1490 
1491 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1492 			size += memcg_page_state_output(memcg,
1493 							NR_SLAB_RECLAIMABLE_B);
1494 			seq_buf_printf(s, "slab %llu\n", size);
1495 		}
1496 	}
1497 
1498 	/* Accumulated memory events */
1499 	seq_buf_printf(s, "pgscan %lu\n",
1500 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1501 		       memcg_events(memcg, PGSCAN_DIRECT) +
1502 		       memcg_events(memcg, PGSCAN_PROACTIVE) +
1503 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1504 	seq_buf_printf(s, "pgsteal %lu\n",
1505 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1506 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1507 		       memcg_events(memcg, PGSTEAL_PROACTIVE) +
1508 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1509 
1510 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1511 #ifdef CONFIG_MEMCG_V1
1512 		if (memcg_vm_event_stat[i] == PGPGIN ||
1513 		    memcg_vm_event_stat[i] == PGPGOUT)
1514 			continue;
1515 #endif
1516 		seq_buf_printf(s, "%s %lu\n",
1517 			       vm_event_name(memcg_vm_event_stat[i]),
1518 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1519 	}
1520 }
1521 
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1522 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1523 {
1524 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1525 		memcg_stat_format(memcg, s);
1526 	else
1527 		memcg1_stat_format(memcg, s);
1528 	if (seq_buf_has_overflowed(s))
1529 		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1530 }
1531 
1532 /**
1533  * mem_cgroup_print_oom_context: Print OOM information relevant to
1534  * memory controller.
1535  * @memcg: The memory cgroup that went over limit
1536  * @p: Task that is going to be killed
1537  *
1538  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1539  * enabled
1540  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1541 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1542 {
1543 	rcu_read_lock();
1544 
1545 	if (memcg) {
1546 		pr_cont(",oom_memcg=");
1547 		pr_cont_cgroup_path(memcg->css.cgroup);
1548 	} else
1549 		pr_cont(",global_oom");
1550 	if (p) {
1551 		pr_cont(",task_memcg=");
1552 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1553 	}
1554 	rcu_read_unlock();
1555 }
1556 
1557 /**
1558  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1559  * memory controller.
1560  * @memcg: The memory cgroup that went over limit
1561  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1562 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1563 {
1564 	/* Use static buffer, for the caller is holding oom_lock. */
1565 	static char buf[SEQ_BUF_SIZE];
1566 	struct seq_buf s;
1567 	unsigned long memory_failcnt;
1568 
1569 	lockdep_assert_held(&oom_lock);
1570 
1571 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1572 		memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1573 	else
1574 		memory_failcnt = memcg->memory.failcnt;
1575 
1576 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1577 		K((u64)page_counter_read(&memcg->memory)),
1578 		K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1579 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1580 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1581 			K((u64)page_counter_read(&memcg->swap)),
1582 			K((u64)READ_ONCE(memcg->swap.max)),
1583 			atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1584 #ifdef CONFIG_MEMCG_V1
1585 	else {
1586 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1587 			K((u64)page_counter_read(&memcg->memsw)),
1588 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1589 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1590 			K((u64)page_counter_read(&memcg->kmem)),
1591 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1592 	}
1593 #endif
1594 
1595 	pr_info("Memory cgroup stats for ");
1596 	pr_cont_cgroup_path(memcg->css.cgroup);
1597 	pr_cont(":");
1598 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1599 	memory_stat_format(memcg, &s);
1600 	seq_buf_do_printk(&s, KERN_INFO);
1601 }
1602 
1603 /*
1604  * Return the memory (and swap, if configured) limit for a memcg.
1605  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1606 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1607 {
1608 	unsigned long max = READ_ONCE(memcg->memory.max);
1609 
1610 	if (do_memsw_account()) {
1611 		if (mem_cgroup_swappiness(memcg)) {
1612 			/* Calculate swap excess capacity from memsw limit */
1613 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1614 
1615 			max += min(swap, (unsigned long)total_swap_pages);
1616 		}
1617 	} else {
1618 		if (mem_cgroup_swappiness(memcg))
1619 			max += min(READ_ONCE(memcg->swap.max),
1620 				   (unsigned long)total_swap_pages);
1621 	}
1622 	return max;
1623 }
1624 
mem_cgroup_size(struct mem_cgroup * memcg)1625 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1626 {
1627 	return page_counter_read(&memcg->memory);
1628 }
1629 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1630 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1631 				     int order)
1632 {
1633 	struct oom_control oc = {
1634 		.zonelist = NULL,
1635 		.nodemask = NULL,
1636 		.memcg = memcg,
1637 		.gfp_mask = gfp_mask,
1638 		.order = order,
1639 	};
1640 	bool ret = true;
1641 
1642 	if (mutex_lock_killable(&oom_lock))
1643 		return true;
1644 
1645 	if (mem_cgroup_margin(memcg) >= (1 << order))
1646 		goto unlock;
1647 
1648 	/*
1649 	 * A few threads which were not waiting at mutex_lock_killable() can
1650 	 * fail to bail out. Therefore, check again after holding oom_lock.
1651 	 */
1652 	ret = out_of_memory(&oc);
1653 
1654 unlock:
1655 	mutex_unlock(&oom_lock);
1656 	return ret;
1657 }
1658 
1659 /*
1660  * Returns true if successfully killed one or more processes. Though in some
1661  * corner cases it can return true even without killing any process.
1662  */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1663 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1664 {
1665 	bool locked, ret;
1666 
1667 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1668 		return false;
1669 
1670 	memcg_memory_event(memcg, MEMCG_OOM);
1671 
1672 	if (!memcg1_oom_prepare(memcg, &locked))
1673 		return false;
1674 
1675 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1676 
1677 	memcg1_oom_finish(memcg, locked);
1678 
1679 	return ret;
1680 }
1681 
1682 /**
1683  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1684  * @victim: task to be killed by the OOM killer
1685  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1686  *
1687  * Returns a pointer to a memory cgroup, which has to be cleaned up
1688  * by killing all belonging OOM-killable tasks.
1689  *
1690  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1691  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1692 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1693 					    struct mem_cgroup *oom_domain)
1694 {
1695 	struct mem_cgroup *oom_group = NULL;
1696 	struct mem_cgroup *memcg;
1697 
1698 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699 		return NULL;
1700 
1701 	if (!oom_domain)
1702 		oom_domain = root_mem_cgroup;
1703 
1704 	rcu_read_lock();
1705 
1706 	memcg = mem_cgroup_from_task(victim);
1707 	if (mem_cgroup_is_root(memcg))
1708 		goto out;
1709 
1710 	/*
1711 	 * If the victim task has been asynchronously moved to a different
1712 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1713 	 * In this case it's better to ignore memory.group.oom.
1714 	 */
1715 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1716 		goto out;
1717 
1718 	/*
1719 	 * Traverse the memory cgroup hierarchy from the victim task's
1720 	 * cgroup up to the OOMing cgroup (or root) to find the
1721 	 * highest-level memory cgroup with oom.group set.
1722 	 */
1723 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1724 		if (READ_ONCE(memcg->oom_group))
1725 			oom_group = memcg;
1726 
1727 		if (memcg == oom_domain)
1728 			break;
1729 	}
1730 
1731 	if (oom_group)
1732 		css_get(&oom_group->css);
1733 out:
1734 	rcu_read_unlock();
1735 
1736 	return oom_group;
1737 }
1738 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1739 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1740 {
1741 	pr_info("Tasks in ");
1742 	pr_cont_cgroup_path(memcg->css.cgroup);
1743 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1744 }
1745 
1746 /*
1747  * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
1748  * nr_pages in a single cacheline. This may change in future.
1749  */
1750 #define NR_MEMCG_STOCK 7
1751 #define FLUSHING_CACHED_CHARGE	0
1752 struct memcg_stock_pcp {
1753 	local_trylock_t lock;
1754 	uint8_t nr_pages[NR_MEMCG_STOCK];
1755 	struct mem_cgroup *cached[NR_MEMCG_STOCK];
1756 
1757 	struct work_struct work;
1758 	unsigned long flags;
1759 };
1760 
1761 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
1762 	.lock = INIT_LOCAL_TRYLOCK(lock),
1763 };
1764 
1765 struct obj_stock_pcp {
1766 	local_trylock_t lock;
1767 	unsigned int nr_bytes;
1768 	struct obj_cgroup *cached_objcg;
1769 	struct pglist_data *cached_pgdat;
1770 	int nr_slab_reclaimable_b;
1771 	int nr_slab_unreclaimable_b;
1772 
1773 	struct work_struct work;
1774 	unsigned long flags;
1775 };
1776 
1777 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
1778 	.lock = INIT_LOCAL_TRYLOCK(lock),
1779 };
1780 
1781 static DEFINE_MUTEX(percpu_charge_mutex);
1782 
1783 static void drain_obj_stock(struct obj_stock_pcp *stock);
1784 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
1785 				     struct mem_cgroup *root_memcg);
1786 
1787 /**
1788  * consume_stock: Try to consume stocked charge on this cpu.
1789  * @memcg: memcg to consume from.
1790  * @nr_pages: how many pages to charge.
1791  *
1792  * Consume the cached charge if enough nr_pages are present otherwise return
1793  * failure. Also return failure for charge request larger than
1794  * MEMCG_CHARGE_BATCH or if the local lock is already taken.
1795  *
1796  * returns true if successful, false otherwise.
1797  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1798 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1799 {
1800 	struct memcg_stock_pcp *stock;
1801 	uint8_t stock_pages;
1802 	bool ret = false;
1803 	int i;
1804 
1805 	if (nr_pages > MEMCG_CHARGE_BATCH ||
1806 	    !local_trylock(&memcg_stock.lock))
1807 		return ret;
1808 
1809 	stock = this_cpu_ptr(&memcg_stock);
1810 
1811 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1812 		if (memcg != READ_ONCE(stock->cached[i]))
1813 			continue;
1814 
1815 		stock_pages = READ_ONCE(stock->nr_pages[i]);
1816 		if (stock_pages >= nr_pages) {
1817 			WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
1818 			ret = true;
1819 		}
1820 		break;
1821 	}
1822 
1823 	local_unlock(&memcg_stock.lock);
1824 
1825 	return ret;
1826 }
1827 
memcg_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)1828 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
1829 {
1830 	page_counter_uncharge(&memcg->memory, nr_pages);
1831 	if (do_memsw_account())
1832 		page_counter_uncharge(&memcg->memsw, nr_pages);
1833 }
1834 
1835 /*
1836  * Returns stocks cached in percpu and reset cached information.
1837  */
drain_stock(struct memcg_stock_pcp * stock,int i)1838 static void drain_stock(struct memcg_stock_pcp *stock, int i)
1839 {
1840 	struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
1841 	uint8_t stock_pages;
1842 
1843 	if (!old)
1844 		return;
1845 
1846 	stock_pages = READ_ONCE(stock->nr_pages[i]);
1847 	if (stock_pages) {
1848 		memcg_uncharge(old, stock_pages);
1849 		WRITE_ONCE(stock->nr_pages[i], 0);
1850 	}
1851 
1852 	css_put(&old->css);
1853 	WRITE_ONCE(stock->cached[i], NULL);
1854 }
1855 
drain_stock_fully(struct memcg_stock_pcp * stock)1856 static void drain_stock_fully(struct memcg_stock_pcp *stock)
1857 {
1858 	int i;
1859 
1860 	for (i = 0; i < NR_MEMCG_STOCK; ++i)
1861 		drain_stock(stock, i);
1862 }
1863 
drain_local_memcg_stock(struct work_struct * dummy)1864 static void drain_local_memcg_stock(struct work_struct *dummy)
1865 {
1866 	struct memcg_stock_pcp *stock;
1867 
1868 	if (WARN_ONCE(!in_task(), "drain in non-task context"))
1869 		return;
1870 
1871 	local_lock(&memcg_stock.lock);
1872 
1873 	stock = this_cpu_ptr(&memcg_stock);
1874 	drain_stock_fully(stock);
1875 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1876 
1877 	local_unlock(&memcg_stock.lock);
1878 }
1879 
drain_local_obj_stock(struct work_struct * dummy)1880 static void drain_local_obj_stock(struct work_struct *dummy)
1881 {
1882 	struct obj_stock_pcp *stock;
1883 
1884 	if (WARN_ONCE(!in_task(), "drain in non-task context"))
1885 		return;
1886 
1887 	local_lock(&obj_stock.lock);
1888 
1889 	stock = this_cpu_ptr(&obj_stock);
1890 	drain_obj_stock(stock);
1891 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1892 
1893 	local_unlock(&obj_stock.lock);
1894 }
1895 
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1896 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1897 {
1898 	struct memcg_stock_pcp *stock;
1899 	struct mem_cgroup *cached;
1900 	uint8_t stock_pages;
1901 	bool success = false;
1902 	int empty_slot = -1;
1903 	int i;
1904 
1905 	/*
1906 	 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
1907 	 * decide to increase it more than 127 then we will need more careful
1908 	 * handling of nr_pages[] in struct memcg_stock_pcp.
1909 	 */
1910 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
1911 
1912 	VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
1913 
1914 	if (nr_pages > MEMCG_CHARGE_BATCH ||
1915 	    !local_trylock(&memcg_stock.lock)) {
1916 		/*
1917 		 * In case of larger than batch refill or unlikely failure to
1918 		 * lock the percpu memcg_stock.lock, uncharge memcg directly.
1919 		 */
1920 		memcg_uncharge(memcg, nr_pages);
1921 		return;
1922 	}
1923 
1924 	stock = this_cpu_ptr(&memcg_stock);
1925 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1926 		cached = READ_ONCE(stock->cached[i]);
1927 		if (!cached && empty_slot == -1)
1928 			empty_slot = i;
1929 		if (memcg == READ_ONCE(stock->cached[i])) {
1930 			stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
1931 			WRITE_ONCE(stock->nr_pages[i], stock_pages);
1932 			if (stock_pages > MEMCG_CHARGE_BATCH)
1933 				drain_stock(stock, i);
1934 			success = true;
1935 			break;
1936 		}
1937 	}
1938 
1939 	if (!success) {
1940 		i = empty_slot;
1941 		if (i == -1) {
1942 			i = get_random_u32_below(NR_MEMCG_STOCK);
1943 			drain_stock(stock, i);
1944 		}
1945 		css_get(&memcg->css);
1946 		WRITE_ONCE(stock->cached[i], memcg);
1947 		WRITE_ONCE(stock->nr_pages[i], nr_pages);
1948 	}
1949 
1950 	local_unlock(&memcg_stock.lock);
1951 }
1952 
is_memcg_drain_needed(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)1953 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
1954 				  struct mem_cgroup *root_memcg)
1955 {
1956 	struct mem_cgroup *memcg;
1957 	bool flush = false;
1958 	int i;
1959 
1960 	rcu_read_lock();
1961 	for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1962 		memcg = READ_ONCE(stock->cached[i]);
1963 		if (!memcg)
1964 			continue;
1965 
1966 		if (READ_ONCE(stock->nr_pages[i]) &&
1967 		    mem_cgroup_is_descendant(memcg, root_memcg)) {
1968 			flush = true;
1969 			break;
1970 		}
1971 	}
1972 	rcu_read_unlock();
1973 	return flush;
1974 }
1975 
1976 /*
1977  * Drains all per-CPU charge caches for given root_memcg resp. subtree
1978  * of the hierarchy under it.
1979  */
drain_all_stock(struct mem_cgroup * root_memcg)1980 void drain_all_stock(struct mem_cgroup *root_memcg)
1981 {
1982 	int cpu, curcpu;
1983 
1984 	/* If someone's already draining, avoid adding running more workers. */
1985 	if (!mutex_trylock(&percpu_charge_mutex))
1986 		return;
1987 	/*
1988 	 * Notify other cpus that system-wide "drain" is running
1989 	 * We do not care about races with the cpu hotplug because cpu down
1990 	 * as well as workers from this path always operate on the local
1991 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1992 	 */
1993 	migrate_disable();
1994 	curcpu = smp_processor_id();
1995 	for_each_online_cpu(cpu) {
1996 		struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
1997 		struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
1998 
1999 		if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
2000 		    is_memcg_drain_needed(memcg_st, root_memcg) &&
2001 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2002 				      &memcg_st->flags)) {
2003 			if (cpu == curcpu)
2004 				drain_local_memcg_stock(&memcg_st->work);
2005 			else if (!cpu_is_isolated(cpu))
2006 				schedule_work_on(cpu, &memcg_st->work);
2007 		}
2008 
2009 		if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2010 		    obj_stock_flush_required(obj_st, root_memcg) &&
2011 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2012 				      &obj_st->flags)) {
2013 			if (cpu == curcpu)
2014 				drain_local_obj_stock(&obj_st->work);
2015 			else if (!cpu_is_isolated(cpu))
2016 				schedule_work_on(cpu, &obj_st->work);
2017 		}
2018 	}
2019 	migrate_enable();
2020 	mutex_unlock(&percpu_charge_mutex);
2021 }
2022 
memcg_hotplug_cpu_dead(unsigned int cpu)2023 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2024 {
2025 	/* no need for the local lock */
2026 	drain_obj_stock(&per_cpu(obj_stock, cpu));
2027 	drain_stock_fully(&per_cpu(memcg_stock, cpu));
2028 
2029 	return 0;
2030 }
2031 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2032 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2033 				  unsigned int nr_pages,
2034 				  gfp_t gfp_mask)
2035 {
2036 	unsigned long nr_reclaimed = 0;
2037 
2038 	do {
2039 		unsigned long pflags;
2040 
2041 		if (page_counter_read(&memcg->memory) <=
2042 		    READ_ONCE(memcg->memory.high))
2043 			continue;
2044 
2045 		memcg_memory_event(memcg, MEMCG_HIGH);
2046 
2047 		psi_memstall_enter(&pflags);
2048 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2049 							gfp_mask,
2050 							MEMCG_RECLAIM_MAY_SWAP,
2051 							NULL);
2052 		psi_memstall_leave(&pflags);
2053 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2054 		 !mem_cgroup_is_root(memcg));
2055 
2056 	return nr_reclaimed;
2057 }
2058 
high_work_func(struct work_struct * work)2059 static void high_work_func(struct work_struct *work)
2060 {
2061 	struct mem_cgroup *memcg;
2062 
2063 	memcg = container_of(work, struct mem_cgroup, high_work);
2064 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2065 }
2066 
2067 /*
2068  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2069  * enough to still cause a significant slowdown in most cases, while still
2070  * allowing diagnostics and tracing to proceed without becoming stuck.
2071  */
2072 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2073 
2074 /*
2075  * When calculating the delay, we use these either side of the exponentiation to
2076  * maintain precision and scale to a reasonable number of jiffies (see the table
2077  * below.
2078  *
2079  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2080  *   overage ratio to a delay.
2081  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2082  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2083  *   to produce a reasonable delay curve.
2084  *
2085  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2086  * reasonable delay curve compared to precision-adjusted overage, not
2087  * penalising heavily at first, but still making sure that growth beyond the
2088  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2089  * example, with a high of 100 megabytes:
2090  *
2091  *  +-------+------------------------+
2092  *  | usage | time to allocate in ms |
2093  *  +-------+------------------------+
2094  *  | 100M  |                      0 |
2095  *  | 101M  |                      6 |
2096  *  | 102M  |                     25 |
2097  *  | 103M  |                     57 |
2098  *  | 104M  |                    102 |
2099  *  | 105M  |                    159 |
2100  *  | 106M  |                    230 |
2101  *  | 107M  |                    313 |
2102  *  | 108M  |                    409 |
2103  *  | 109M  |                    518 |
2104  *  | 110M  |                    639 |
2105  *  | 111M  |                    774 |
2106  *  | 112M  |                    921 |
2107  *  | 113M  |                   1081 |
2108  *  | 114M  |                   1254 |
2109  *  | 115M  |                   1439 |
2110  *  | 116M  |                   1638 |
2111  *  | 117M  |                   1849 |
2112  *  | 118M  |                   2000 |
2113  *  | 119M  |                   2000 |
2114  *  | 120M  |                   2000 |
2115  *  +-------+------------------------+
2116  */
2117  #define MEMCG_DELAY_PRECISION_SHIFT 20
2118  #define MEMCG_DELAY_SCALING_SHIFT 14
2119 
calculate_overage(unsigned long usage,unsigned long high)2120 static u64 calculate_overage(unsigned long usage, unsigned long high)
2121 {
2122 	u64 overage;
2123 
2124 	if (usage <= high)
2125 		return 0;
2126 
2127 	/*
2128 	 * Prevent division by 0 in overage calculation by acting as if
2129 	 * it was a threshold of 1 page
2130 	 */
2131 	high = max(high, 1UL);
2132 
2133 	overage = usage - high;
2134 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2135 	return div64_u64(overage, high);
2136 }
2137 
mem_find_max_overage(struct mem_cgroup * memcg)2138 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2139 {
2140 	u64 overage, max_overage = 0;
2141 
2142 	do {
2143 		overage = calculate_overage(page_counter_read(&memcg->memory),
2144 					    READ_ONCE(memcg->memory.high));
2145 		max_overage = max(overage, max_overage);
2146 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2147 		 !mem_cgroup_is_root(memcg));
2148 
2149 	return max_overage;
2150 }
2151 
swap_find_max_overage(struct mem_cgroup * memcg)2152 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2153 {
2154 	u64 overage, max_overage = 0;
2155 
2156 	do {
2157 		overage = calculate_overage(page_counter_read(&memcg->swap),
2158 					    READ_ONCE(memcg->swap.high));
2159 		if (overage)
2160 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2161 		max_overage = max(overage, max_overage);
2162 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2163 		 !mem_cgroup_is_root(memcg));
2164 
2165 	return max_overage;
2166 }
2167 
2168 /*
2169  * Get the number of jiffies that we should penalise a mischievous cgroup which
2170  * is exceeding its memory.high by checking both it and its ancestors.
2171  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2172 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2173 					  unsigned int nr_pages,
2174 					  u64 max_overage)
2175 {
2176 	unsigned long penalty_jiffies;
2177 
2178 	if (!max_overage)
2179 		return 0;
2180 
2181 	/*
2182 	 * We use overage compared to memory.high to calculate the number of
2183 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2184 	 * fairly lenient on small overages, and increasingly harsh when the
2185 	 * memcg in question makes it clear that it has no intention of stopping
2186 	 * its crazy behaviour, so we exponentially increase the delay based on
2187 	 * overage amount.
2188 	 */
2189 	penalty_jiffies = max_overage * max_overage * HZ;
2190 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2191 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2192 
2193 	/*
2194 	 * Factor in the task's own contribution to the overage, such that four
2195 	 * N-sized allocations are throttled approximately the same as one
2196 	 * 4N-sized allocation.
2197 	 *
2198 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2199 	 * larger the current charge patch is than that.
2200 	 */
2201 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2202 }
2203 
2204 /*
2205  * Reclaims memory over the high limit. Called directly from
2206  * try_charge() (context permitting), as well as from the userland
2207  * return path where reclaim is always able to block.
2208  */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2209 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2210 {
2211 	unsigned long penalty_jiffies;
2212 	unsigned long pflags;
2213 	unsigned long nr_reclaimed;
2214 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2215 	int nr_retries = MAX_RECLAIM_RETRIES;
2216 	struct mem_cgroup *memcg;
2217 	bool in_retry = false;
2218 
2219 	if (likely(!nr_pages))
2220 		return;
2221 
2222 	memcg = get_mem_cgroup_from_mm(current->mm);
2223 	current->memcg_nr_pages_over_high = 0;
2224 
2225 retry_reclaim:
2226 	/*
2227 	 * Bail if the task is already exiting. Unlike memory.max,
2228 	 * memory.high enforcement isn't as strict, and there is no
2229 	 * OOM killer involved, which means the excess could already
2230 	 * be much bigger (and still growing) than it could for
2231 	 * memory.max; the dying task could get stuck in fruitless
2232 	 * reclaim for a long time, which isn't desirable.
2233 	 */
2234 	if (task_is_dying())
2235 		goto out;
2236 
2237 	/*
2238 	 * The allocating task should reclaim at least the batch size, but for
2239 	 * subsequent retries we only want to do what's necessary to prevent oom
2240 	 * or breaching resource isolation.
2241 	 *
2242 	 * This is distinct from memory.max or page allocator behaviour because
2243 	 * memory.high is currently batched, whereas memory.max and the page
2244 	 * allocator run every time an allocation is made.
2245 	 */
2246 	nr_reclaimed = reclaim_high(memcg,
2247 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2248 				    gfp_mask);
2249 
2250 	/*
2251 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2252 	 * allocators proactively to slow down excessive growth.
2253 	 */
2254 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2255 					       mem_find_max_overage(memcg));
2256 
2257 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2258 						swap_find_max_overage(memcg));
2259 
2260 	/*
2261 	 * Clamp the max delay per usermode return so as to still keep the
2262 	 * application moving forwards and also permit diagnostics, albeit
2263 	 * extremely slowly.
2264 	 */
2265 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2266 
2267 	/*
2268 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2269 	 * that it's not even worth doing, in an attempt to be nice to those who
2270 	 * go only a small amount over their memory.high value and maybe haven't
2271 	 * been aggressively reclaimed enough yet.
2272 	 */
2273 	if (penalty_jiffies <= HZ / 100)
2274 		goto out;
2275 
2276 	/*
2277 	 * If reclaim is making forward progress but we're still over
2278 	 * memory.high, we want to encourage that rather than doing allocator
2279 	 * throttling.
2280 	 */
2281 	if (nr_reclaimed || nr_retries--) {
2282 		in_retry = true;
2283 		goto retry_reclaim;
2284 	}
2285 
2286 	/*
2287 	 * Reclaim didn't manage to push usage below the limit, slow
2288 	 * this allocating task down.
2289 	 *
2290 	 * If we exit early, we're guaranteed to die (since
2291 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2292 	 * need to account for any ill-begotten jiffies to pay them off later.
2293 	 */
2294 	psi_memstall_enter(&pflags);
2295 	schedule_timeout_killable(penalty_jiffies);
2296 	psi_memstall_leave(&pflags);
2297 
2298 out:
2299 	css_put(&memcg->css);
2300 }
2301 
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2302 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2303 			    unsigned int nr_pages)
2304 {
2305 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2306 	int nr_retries = MAX_RECLAIM_RETRIES;
2307 	struct mem_cgroup *mem_over_limit;
2308 	struct page_counter *counter;
2309 	unsigned long nr_reclaimed;
2310 	bool passed_oom = false;
2311 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2312 	bool drained = false;
2313 	bool raised_max_event = false;
2314 	unsigned long pflags;
2315 
2316 retry:
2317 	if (consume_stock(memcg, nr_pages))
2318 		return 0;
2319 
2320 	if (!gfpflags_allow_spinning(gfp_mask))
2321 		/* Avoid the refill and flush of the older stock */
2322 		batch = nr_pages;
2323 
2324 	if (!do_memsw_account() ||
2325 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2326 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2327 			goto done_restock;
2328 		if (do_memsw_account())
2329 			page_counter_uncharge(&memcg->memsw, batch);
2330 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2331 	} else {
2332 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2333 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2334 	}
2335 
2336 	if (batch > nr_pages) {
2337 		batch = nr_pages;
2338 		goto retry;
2339 	}
2340 
2341 	/*
2342 	 * Prevent unbounded recursion when reclaim operations need to
2343 	 * allocate memory. This might exceed the limits temporarily,
2344 	 * but we prefer facilitating memory reclaim and getting back
2345 	 * under the limit over triggering OOM kills in these cases.
2346 	 */
2347 	if (unlikely(current->flags & PF_MEMALLOC))
2348 		goto force;
2349 
2350 	if (unlikely(task_in_memcg_oom(current)))
2351 		goto nomem;
2352 
2353 	if (!gfpflags_allow_blocking(gfp_mask))
2354 		goto nomem;
2355 
2356 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2357 	raised_max_event = true;
2358 
2359 	psi_memstall_enter(&pflags);
2360 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2361 						    gfp_mask, reclaim_options, NULL);
2362 	psi_memstall_leave(&pflags);
2363 
2364 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2365 		goto retry;
2366 
2367 	if (!drained) {
2368 		drain_all_stock(mem_over_limit);
2369 		drained = true;
2370 		goto retry;
2371 	}
2372 
2373 	if (gfp_mask & __GFP_NORETRY)
2374 		goto nomem;
2375 	/*
2376 	 * Even though the limit is exceeded at this point, reclaim
2377 	 * may have been able to free some pages.  Retry the charge
2378 	 * before killing the task.
2379 	 *
2380 	 * Only for regular pages, though: huge pages are rather
2381 	 * unlikely to succeed so close to the limit, and we fall back
2382 	 * to regular pages anyway in case of failure.
2383 	 */
2384 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2385 		goto retry;
2386 
2387 	if (nr_retries--)
2388 		goto retry;
2389 
2390 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2391 		goto nomem;
2392 
2393 	/* Avoid endless loop for tasks bypassed by the oom killer */
2394 	if (passed_oom && task_is_dying())
2395 		goto nomem;
2396 
2397 	/*
2398 	 * keep retrying as long as the memcg oom killer is able to make
2399 	 * a forward progress or bypass the charge if the oom killer
2400 	 * couldn't make any progress.
2401 	 */
2402 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2403 			   get_order(nr_pages * PAGE_SIZE))) {
2404 		passed_oom = true;
2405 		nr_retries = MAX_RECLAIM_RETRIES;
2406 		goto retry;
2407 	}
2408 nomem:
2409 	/*
2410 	 * Memcg doesn't have a dedicated reserve for atomic
2411 	 * allocations. But like the global atomic pool, we need to
2412 	 * put the burden of reclaim on regular allocation requests
2413 	 * and let these go through as privileged allocations.
2414 	 */
2415 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2416 		return -ENOMEM;
2417 force:
2418 	/*
2419 	 * If the allocation has to be enforced, don't forget to raise
2420 	 * a MEMCG_MAX event.
2421 	 */
2422 	if (!raised_max_event)
2423 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2424 
2425 	/*
2426 	 * The allocation either can't fail or will lead to more memory
2427 	 * being freed very soon.  Allow memory usage go over the limit
2428 	 * temporarily by force charging it.
2429 	 */
2430 	page_counter_charge(&memcg->memory, nr_pages);
2431 	if (do_memsw_account())
2432 		page_counter_charge(&memcg->memsw, nr_pages);
2433 
2434 	return 0;
2435 
2436 done_restock:
2437 	if (batch > nr_pages)
2438 		refill_stock(memcg, batch - nr_pages);
2439 
2440 	/*
2441 	 * If the hierarchy is above the normal consumption range, schedule
2442 	 * reclaim on returning to userland.  We can perform reclaim here
2443 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2444 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2445 	 * not recorded as it most likely matches current's and won't
2446 	 * change in the meantime.  As high limit is checked again before
2447 	 * reclaim, the cost of mismatch is negligible.
2448 	 */
2449 	do {
2450 		bool mem_high, swap_high;
2451 
2452 		mem_high = page_counter_read(&memcg->memory) >
2453 			READ_ONCE(memcg->memory.high);
2454 		swap_high = page_counter_read(&memcg->swap) >
2455 			READ_ONCE(memcg->swap.high);
2456 
2457 		/* Don't bother a random interrupted task */
2458 		if (!in_task()) {
2459 			if (mem_high) {
2460 				schedule_work(&memcg->high_work);
2461 				break;
2462 			}
2463 			continue;
2464 		}
2465 
2466 		if (mem_high || swap_high) {
2467 			/*
2468 			 * The allocating tasks in this cgroup will need to do
2469 			 * reclaim or be throttled to prevent further growth
2470 			 * of the memory or swap footprints.
2471 			 *
2472 			 * Target some best-effort fairness between the tasks,
2473 			 * and distribute reclaim work and delay penalties
2474 			 * based on how much each task is actually allocating.
2475 			 */
2476 			current->memcg_nr_pages_over_high += batch;
2477 			set_notify_resume(current);
2478 			break;
2479 		}
2480 	} while ((memcg = parent_mem_cgroup(memcg)));
2481 
2482 	/*
2483 	 * Reclaim is set up above to be called from the userland
2484 	 * return path. But also attempt synchronous reclaim to avoid
2485 	 * excessive overrun while the task is still inside the
2486 	 * kernel. If this is successful, the return path will see it
2487 	 * when it rechecks the overage and simply bail out.
2488 	 */
2489 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2490 	    !(current->flags & PF_MEMALLOC) &&
2491 	    gfpflags_allow_blocking(gfp_mask))
2492 		mem_cgroup_handle_over_high(gfp_mask);
2493 	return 0;
2494 }
2495 
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2496 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2497 			     unsigned int nr_pages)
2498 {
2499 	if (mem_cgroup_is_root(memcg))
2500 		return 0;
2501 
2502 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2503 }
2504 
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2505 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2506 {
2507 	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2508 	/*
2509 	 * Any of the following ensures page's memcg stability:
2510 	 *
2511 	 * - the page lock
2512 	 * - LRU isolation
2513 	 * - exclusive reference
2514 	 */
2515 	folio->memcg_data = (unsigned long)memcg;
2516 }
2517 
2518 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2519 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2520 					 struct pglist_data *pgdat,
2521 					 enum node_stat_item idx, int nr)
2522 {
2523 	struct lruvec *lruvec;
2524 
2525 	if (likely(!in_nmi())) {
2526 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
2527 		mod_memcg_lruvec_state(lruvec, idx, nr);
2528 	} else {
2529 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2530 
2531 		/* TODO: add to cgroup update tree once it is nmi-safe. */
2532 		if (idx == NR_SLAB_RECLAIMABLE_B)
2533 			atomic_add(nr, &pn->slab_reclaimable);
2534 		else
2535 			atomic_add(nr, &pn->slab_unreclaimable);
2536 	}
2537 }
2538 #else
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2539 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2540 					 struct pglist_data *pgdat,
2541 					 enum node_stat_item idx, int nr)
2542 {
2543 	struct lruvec *lruvec;
2544 
2545 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2546 	mod_memcg_lruvec_state(lruvec, idx, nr);
2547 }
2548 #endif
2549 
mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2550 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2551 				       struct pglist_data *pgdat,
2552 				       enum node_stat_item idx, int nr)
2553 {
2554 	struct mem_cgroup *memcg;
2555 
2556 	rcu_read_lock();
2557 	memcg = obj_cgroup_memcg(objcg);
2558 	account_slab_nmi_safe(memcg, pgdat, idx, nr);
2559 	rcu_read_unlock();
2560 }
2561 
2562 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2563 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2564 {
2565 	/*
2566 	 * Slab objects are accounted individually, not per-page.
2567 	 * Memcg membership data for each individual object is saved in
2568 	 * slab->obj_exts.
2569 	 */
2570 	if (folio_test_slab(folio)) {
2571 		struct slabobj_ext *obj_exts;
2572 		struct slab *slab;
2573 		unsigned int off;
2574 
2575 		slab = folio_slab(folio);
2576 		obj_exts = slab_obj_exts(slab);
2577 		if (!obj_exts)
2578 			return NULL;
2579 
2580 		off = obj_to_index(slab->slab_cache, slab, p);
2581 		if (obj_exts[off].objcg)
2582 			return obj_cgroup_memcg(obj_exts[off].objcg);
2583 
2584 		return NULL;
2585 	}
2586 
2587 	/*
2588 	 * folio_memcg_check() is used here, because in theory we can encounter
2589 	 * a folio where the slab flag has been cleared already, but
2590 	 * slab->obj_exts has not been freed yet
2591 	 * folio_memcg_check() will guarantee that a proper memory
2592 	 * cgroup pointer or NULL will be returned.
2593 	 */
2594 	return folio_memcg_check(folio);
2595 }
2596 
2597 /*
2598  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2599  * It is not suitable for objects allocated using vmalloc().
2600  *
2601  * A passed kernel object must be a slab object or a generic kernel page.
2602  *
2603  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2604  * cgroup_mutex, etc.
2605  */
mem_cgroup_from_slab_obj(void * p)2606 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2607 {
2608 	if (mem_cgroup_disabled())
2609 		return NULL;
2610 
2611 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2612 }
2613 
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2614 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2615 {
2616 	struct obj_cgroup *objcg = NULL;
2617 
2618 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2619 		objcg = rcu_dereference(memcg->objcg);
2620 		if (likely(objcg && obj_cgroup_tryget(objcg)))
2621 			break;
2622 		objcg = NULL;
2623 	}
2624 	return objcg;
2625 }
2626 
current_objcg_update(void)2627 static struct obj_cgroup *current_objcg_update(void)
2628 {
2629 	struct mem_cgroup *memcg;
2630 	struct obj_cgroup *old, *objcg = NULL;
2631 
2632 	do {
2633 		/* Atomically drop the update bit. */
2634 		old = xchg(&current->objcg, NULL);
2635 		if (old) {
2636 			old = (struct obj_cgroup *)
2637 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2638 			obj_cgroup_put(old);
2639 
2640 			old = NULL;
2641 		}
2642 
2643 		/* If new objcg is NULL, no reason for the second atomic update. */
2644 		if (!current->mm || (current->flags & PF_KTHREAD))
2645 			return NULL;
2646 
2647 		/*
2648 		 * Release the objcg pointer from the previous iteration,
2649 		 * if try_cmpxcg() below fails.
2650 		 */
2651 		if (unlikely(objcg)) {
2652 			obj_cgroup_put(objcg);
2653 			objcg = NULL;
2654 		}
2655 
2656 		/*
2657 		 * Obtain the new objcg pointer. The current task can be
2658 		 * asynchronously moved to another memcg and the previous
2659 		 * memcg can be offlined. So let's get the memcg pointer
2660 		 * and try get a reference to objcg under a rcu read lock.
2661 		 */
2662 
2663 		rcu_read_lock();
2664 		memcg = mem_cgroup_from_task(current);
2665 		objcg = __get_obj_cgroup_from_memcg(memcg);
2666 		rcu_read_unlock();
2667 
2668 		/*
2669 		 * Try set up a new objcg pointer atomically. If it
2670 		 * fails, it means the update flag was set concurrently, so
2671 		 * the whole procedure should be repeated.
2672 		 */
2673 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2674 
2675 	return objcg;
2676 }
2677 
current_obj_cgroup(void)2678 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2679 {
2680 	struct mem_cgroup *memcg;
2681 	struct obj_cgroup *objcg;
2682 
2683 	if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2684 		return NULL;
2685 
2686 	if (in_task()) {
2687 		memcg = current->active_memcg;
2688 		if (unlikely(memcg))
2689 			goto from_memcg;
2690 
2691 		objcg = READ_ONCE(current->objcg);
2692 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2693 			objcg = current_objcg_update();
2694 		/*
2695 		 * Objcg reference is kept by the task, so it's safe
2696 		 * to use the objcg by the current task.
2697 		 */
2698 		return objcg;
2699 	}
2700 
2701 	memcg = this_cpu_read(int_active_memcg);
2702 	if (unlikely(memcg))
2703 		goto from_memcg;
2704 
2705 	return NULL;
2706 
2707 from_memcg:
2708 	objcg = NULL;
2709 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2710 		/*
2711 		 * Memcg pointer is protected by scope (see set_active_memcg())
2712 		 * and is pinning the corresponding objcg, so objcg can't go
2713 		 * away and can be used within the scope without any additional
2714 		 * protection.
2715 		 */
2716 		objcg = rcu_dereference_check(memcg->objcg, 1);
2717 		if (likely(objcg))
2718 			break;
2719 	}
2720 
2721 	return objcg;
2722 }
2723 
get_obj_cgroup_from_folio(struct folio * folio)2724 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2725 {
2726 	struct obj_cgroup *objcg;
2727 
2728 	if (!memcg_kmem_online())
2729 		return NULL;
2730 
2731 	if (folio_memcg_kmem(folio)) {
2732 		objcg = __folio_objcg(folio);
2733 		obj_cgroup_get(objcg);
2734 	} else {
2735 		struct mem_cgroup *memcg;
2736 
2737 		rcu_read_lock();
2738 		memcg = __folio_memcg(folio);
2739 		if (memcg)
2740 			objcg = __get_obj_cgroup_from_memcg(memcg);
2741 		else
2742 			objcg = NULL;
2743 		rcu_read_unlock();
2744 	}
2745 	return objcg;
2746 }
2747 
2748 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)2749 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2750 {
2751 	if (likely(!in_nmi())) {
2752 		mod_memcg_state(memcg, MEMCG_KMEM, val);
2753 	} else {
2754 		/* TODO: add to cgroup update tree once it is nmi-safe. */
2755 		atomic_add(val, &memcg->kmem_stat);
2756 	}
2757 }
2758 #else
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)2759 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2760 {
2761 	mod_memcg_state(memcg, MEMCG_KMEM, val);
2762 }
2763 #endif
2764 
2765 /*
2766  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2767  * @objcg: object cgroup to uncharge
2768  * @nr_pages: number of pages to uncharge
2769  */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2770 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2771 				      unsigned int nr_pages)
2772 {
2773 	struct mem_cgroup *memcg;
2774 
2775 	memcg = get_mem_cgroup_from_objcg(objcg);
2776 
2777 	account_kmem_nmi_safe(memcg, -nr_pages);
2778 	memcg1_account_kmem(memcg, -nr_pages);
2779 	if (!mem_cgroup_is_root(memcg))
2780 		refill_stock(memcg, nr_pages);
2781 
2782 	css_put(&memcg->css);
2783 }
2784 
2785 /*
2786  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2787  * @objcg: object cgroup to charge
2788  * @gfp: reclaim mode
2789  * @nr_pages: number of pages to charge
2790  *
2791  * Returns 0 on success, an error code on failure.
2792  */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2793 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2794 				   unsigned int nr_pages)
2795 {
2796 	struct mem_cgroup *memcg;
2797 	int ret;
2798 
2799 	memcg = get_mem_cgroup_from_objcg(objcg);
2800 
2801 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2802 	if (ret)
2803 		goto out;
2804 
2805 	account_kmem_nmi_safe(memcg, nr_pages);
2806 	memcg1_account_kmem(memcg, nr_pages);
2807 out:
2808 	css_put(&memcg->css);
2809 
2810 	return ret;
2811 }
2812 
page_objcg(const struct page * page)2813 static struct obj_cgroup *page_objcg(const struct page *page)
2814 {
2815 	unsigned long memcg_data = page->memcg_data;
2816 
2817 	if (mem_cgroup_disabled() || !memcg_data)
2818 		return NULL;
2819 
2820 	VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2821 			page);
2822 	return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2823 }
2824 
page_set_objcg(struct page * page,const struct obj_cgroup * objcg)2825 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2826 {
2827 	page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2828 }
2829 
2830 /**
2831  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2832  * @page: page to charge
2833  * @gfp: reclaim mode
2834  * @order: allocation order
2835  *
2836  * Returns 0 on success, an error code on failure.
2837  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2838 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2839 {
2840 	struct obj_cgroup *objcg;
2841 	int ret = 0;
2842 
2843 	objcg = current_obj_cgroup();
2844 	if (objcg) {
2845 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2846 		if (!ret) {
2847 			obj_cgroup_get(objcg);
2848 			page_set_objcg(page, objcg);
2849 			return 0;
2850 		}
2851 	}
2852 	return ret;
2853 }
2854 
2855 /**
2856  * __memcg_kmem_uncharge_page: uncharge a kmem page
2857  * @page: page to uncharge
2858  * @order: allocation order
2859  */
__memcg_kmem_uncharge_page(struct page * page,int order)2860 void __memcg_kmem_uncharge_page(struct page *page, int order)
2861 {
2862 	struct obj_cgroup *objcg = page_objcg(page);
2863 	unsigned int nr_pages = 1 << order;
2864 
2865 	if (!objcg)
2866 		return;
2867 
2868 	obj_cgroup_uncharge_pages(objcg, nr_pages);
2869 	page->memcg_data = 0;
2870 	obj_cgroup_put(objcg);
2871 }
2872 
__account_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,int nr,struct pglist_data * pgdat,enum node_stat_item idx)2873 static void __account_obj_stock(struct obj_cgroup *objcg,
2874 				struct obj_stock_pcp *stock, int nr,
2875 				struct pglist_data *pgdat, enum node_stat_item idx)
2876 {
2877 	int *bytes;
2878 
2879 	/*
2880 	 * Save vmstat data in stock and skip vmstat array update unless
2881 	 * accumulating over a page of vmstat data or when pgdat changes.
2882 	 */
2883 	if (stock->cached_pgdat != pgdat) {
2884 		/* Flush the existing cached vmstat data */
2885 		struct pglist_data *oldpg = stock->cached_pgdat;
2886 
2887 		if (stock->nr_slab_reclaimable_b) {
2888 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2889 					  stock->nr_slab_reclaimable_b);
2890 			stock->nr_slab_reclaimable_b = 0;
2891 		}
2892 		if (stock->nr_slab_unreclaimable_b) {
2893 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2894 					  stock->nr_slab_unreclaimable_b);
2895 			stock->nr_slab_unreclaimable_b = 0;
2896 		}
2897 		stock->cached_pgdat = pgdat;
2898 	}
2899 
2900 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2901 					       : &stock->nr_slab_unreclaimable_b;
2902 	/*
2903 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2904 	 * cached locally at least once before pushing it out.
2905 	 */
2906 	if (!*bytes) {
2907 		*bytes = nr;
2908 		nr = 0;
2909 	} else {
2910 		*bytes += nr;
2911 		if (abs(*bytes) > PAGE_SIZE) {
2912 			nr = *bytes;
2913 			*bytes = 0;
2914 		} else {
2915 			nr = 0;
2916 		}
2917 	}
2918 	if (nr)
2919 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
2920 }
2921 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,struct pglist_data * pgdat,enum node_stat_item idx)2922 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2923 			      struct pglist_data *pgdat, enum node_stat_item idx)
2924 {
2925 	struct obj_stock_pcp *stock;
2926 	bool ret = false;
2927 
2928 	if (!local_trylock(&obj_stock.lock))
2929 		return ret;
2930 
2931 	stock = this_cpu_ptr(&obj_stock);
2932 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2933 		stock->nr_bytes -= nr_bytes;
2934 		ret = true;
2935 
2936 		if (pgdat)
2937 			__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
2938 	}
2939 
2940 	local_unlock(&obj_stock.lock);
2941 
2942 	return ret;
2943 }
2944 
drain_obj_stock(struct obj_stock_pcp * stock)2945 static void drain_obj_stock(struct obj_stock_pcp *stock)
2946 {
2947 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2948 
2949 	if (!old)
2950 		return;
2951 
2952 	if (stock->nr_bytes) {
2953 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2954 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2955 
2956 		if (nr_pages) {
2957 			struct mem_cgroup *memcg;
2958 
2959 			memcg = get_mem_cgroup_from_objcg(old);
2960 
2961 			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2962 			memcg1_account_kmem(memcg, -nr_pages);
2963 			if (!mem_cgroup_is_root(memcg))
2964 				memcg_uncharge(memcg, nr_pages);
2965 
2966 			css_put(&memcg->css);
2967 		}
2968 
2969 		/*
2970 		 * The leftover is flushed to the centralized per-memcg value.
2971 		 * On the next attempt to refill obj stock it will be moved
2972 		 * to a per-cpu stock (probably, on an other CPU), see
2973 		 * refill_obj_stock().
2974 		 *
2975 		 * How often it's flushed is a trade-off between the memory
2976 		 * limit enforcement accuracy and potential CPU contention,
2977 		 * so it might be changed in the future.
2978 		 */
2979 		atomic_add(nr_bytes, &old->nr_charged_bytes);
2980 		stock->nr_bytes = 0;
2981 	}
2982 
2983 	/*
2984 	 * Flush the vmstat data in current stock
2985 	 */
2986 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2987 		if (stock->nr_slab_reclaimable_b) {
2988 			mod_objcg_mlstate(old, stock->cached_pgdat,
2989 					  NR_SLAB_RECLAIMABLE_B,
2990 					  stock->nr_slab_reclaimable_b);
2991 			stock->nr_slab_reclaimable_b = 0;
2992 		}
2993 		if (stock->nr_slab_unreclaimable_b) {
2994 			mod_objcg_mlstate(old, stock->cached_pgdat,
2995 					  NR_SLAB_UNRECLAIMABLE_B,
2996 					  stock->nr_slab_unreclaimable_b);
2997 			stock->nr_slab_unreclaimable_b = 0;
2998 		}
2999 		stock->cached_pgdat = NULL;
3000 	}
3001 
3002 	WRITE_ONCE(stock->cached_objcg, NULL);
3003 	obj_cgroup_put(old);
3004 }
3005 
obj_stock_flush_required(struct obj_stock_pcp * stock,struct mem_cgroup * root_memcg)3006 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3007 				     struct mem_cgroup *root_memcg)
3008 {
3009 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3010 	struct mem_cgroup *memcg;
3011 	bool flush = false;
3012 
3013 	rcu_read_lock();
3014 	if (objcg) {
3015 		memcg = obj_cgroup_memcg(objcg);
3016 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3017 			flush = true;
3018 	}
3019 	rcu_read_unlock();
3020 
3021 	return flush;
3022 }
3023 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge,int nr_acct,struct pglist_data * pgdat,enum node_stat_item idx)3024 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3025 		bool allow_uncharge, int nr_acct, struct pglist_data *pgdat,
3026 		enum node_stat_item idx)
3027 {
3028 	struct obj_stock_pcp *stock;
3029 	unsigned int nr_pages = 0;
3030 
3031 	if (!local_trylock(&obj_stock.lock)) {
3032 		if (pgdat)
3033 			mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
3034 		nr_pages = nr_bytes >> PAGE_SHIFT;
3035 		nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3036 		atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3037 		goto out;
3038 	}
3039 
3040 	stock = this_cpu_ptr(&obj_stock);
3041 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3042 		drain_obj_stock(stock);
3043 		obj_cgroup_get(objcg);
3044 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3045 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3046 		WRITE_ONCE(stock->cached_objcg, objcg);
3047 
3048 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3049 	}
3050 	stock->nr_bytes += nr_bytes;
3051 
3052 	if (pgdat)
3053 		__account_obj_stock(objcg, stock, nr_acct, pgdat, idx);
3054 
3055 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3056 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3057 		stock->nr_bytes &= (PAGE_SIZE - 1);
3058 	}
3059 
3060 	local_unlock(&obj_stock.lock);
3061 out:
3062 	if (nr_pages)
3063 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3064 }
3065 
obj_cgroup_charge_account(struct obj_cgroup * objcg,gfp_t gfp,size_t size,struct pglist_data * pgdat,enum node_stat_item idx)3066 static int obj_cgroup_charge_account(struct obj_cgroup *objcg, gfp_t gfp, size_t size,
3067 				     struct pglist_data *pgdat, enum node_stat_item idx)
3068 {
3069 	unsigned int nr_pages, nr_bytes;
3070 	int ret;
3071 
3072 	if (likely(consume_obj_stock(objcg, size, pgdat, idx)))
3073 		return 0;
3074 
3075 	/*
3076 	 * In theory, objcg->nr_charged_bytes can have enough
3077 	 * pre-charged bytes to satisfy the allocation. However,
3078 	 * flushing objcg->nr_charged_bytes requires two atomic
3079 	 * operations, and objcg->nr_charged_bytes can't be big.
3080 	 * The shared objcg->nr_charged_bytes can also become a
3081 	 * performance bottleneck if all tasks of the same memcg are
3082 	 * trying to update it. So it's better to ignore it and try
3083 	 * grab some new pages. The stock's nr_bytes will be flushed to
3084 	 * objcg->nr_charged_bytes later on when objcg changes.
3085 	 *
3086 	 * The stock's nr_bytes may contain enough pre-charged bytes
3087 	 * to allow one less page from being charged, but we can't rely
3088 	 * on the pre-charged bytes not being changed outside of
3089 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3090 	 * pre-charged bytes as well when charging pages. To avoid a
3091 	 * page uncharge right after a page charge, we set the
3092 	 * allow_uncharge flag to false when calling refill_obj_stock()
3093 	 * to temporarily allow the pre-charged bytes to exceed the page
3094 	 * size limit. The maximum reachable value of the pre-charged
3095 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3096 	 * race.
3097 	 */
3098 	nr_pages = size >> PAGE_SHIFT;
3099 	nr_bytes = size & (PAGE_SIZE - 1);
3100 
3101 	if (nr_bytes)
3102 		nr_pages += 1;
3103 
3104 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3105 	if (!ret && (nr_bytes || pgdat))
3106 		refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0,
3107 					 false, size, pgdat, idx);
3108 
3109 	return ret;
3110 }
3111 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3112 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3113 {
3114 	return obj_cgroup_charge_account(objcg, gfp, size, NULL, 0);
3115 }
3116 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3117 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3118 {
3119 	refill_obj_stock(objcg, size, true, 0, NULL, 0);
3120 }
3121 
obj_full_size(struct kmem_cache * s)3122 static inline size_t obj_full_size(struct kmem_cache *s)
3123 {
3124 	/*
3125 	 * For each accounted object there is an extra space which is used
3126 	 * to store obj_cgroup membership. Charge it too.
3127 	 */
3128 	return s->size + sizeof(struct obj_cgroup *);
3129 }
3130 
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)3131 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3132 				  gfp_t flags, size_t size, void **p)
3133 {
3134 	struct obj_cgroup *objcg;
3135 	struct slab *slab;
3136 	unsigned long off;
3137 	size_t i;
3138 
3139 	/*
3140 	 * The obtained objcg pointer is safe to use within the current scope,
3141 	 * defined by current task or set_active_memcg() pair.
3142 	 * obj_cgroup_get() is used to get a permanent reference.
3143 	 */
3144 	objcg = current_obj_cgroup();
3145 	if (!objcg)
3146 		return true;
3147 
3148 	/*
3149 	 * slab_alloc_node() avoids the NULL check, so we might be called with a
3150 	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3151 	 * the whole requested size.
3152 	 * return success as there's nothing to free back
3153 	 */
3154 	if (unlikely(*p == NULL))
3155 		return true;
3156 
3157 	flags &= gfp_allowed_mask;
3158 
3159 	if (lru) {
3160 		int ret;
3161 		struct mem_cgroup *memcg;
3162 
3163 		memcg = get_mem_cgroup_from_objcg(objcg);
3164 		ret = memcg_list_lru_alloc(memcg, lru, flags);
3165 		css_put(&memcg->css);
3166 
3167 		if (ret)
3168 			return false;
3169 	}
3170 
3171 	for (i = 0; i < size; i++) {
3172 		slab = virt_to_slab(p[i]);
3173 
3174 		if (!slab_obj_exts(slab) &&
3175 		    alloc_slab_obj_exts(slab, s, flags, false)) {
3176 			continue;
3177 		}
3178 
3179 		/*
3180 		 * if we fail and size is 1, memcg_alloc_abort_single() will
3181 		 * just free the object, which is ok as we have not assigned
3182 		 * objcg to its obj_ext yet
3183 		 *
3184 		 * for larger sizes, kmem_cache_free_bulk() will uncharge
3185 		 * any objects that were already charged and obj_ext assigned
3186 		 *
3187 		 * TODO: we could batch this until slab_pgdat(slab) changes
3188 		 * between iterations, with a more complicated undo
3189 		 */
3190 		if (obj_cgroup_charge_account(objcg, flags, obj_full_size(s),
3191 					slab_pgdat(slab), cache_vmstat_idx(s)))
3192 			return false;
3193 
3194 		off = obj_to_index(s, slab, p[i]);
3195 		obj_cgroup_get(objcg);
3196 		slab_obj_exts(slab)[off].objcg = objcg;
3197 	}
3198 
3199 	return true;
3200 }
3201 
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3202 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3203 			    void **p, int objects, struct slabobj_ext *obj_exts)
3204 {
3205 	size_t obj_size = obj_full_size(s);
3206 
3207 	for (int i = 0; i < objects; i++) {
3208 		struct obj_cgroup *objcg;
3209 		unsigned int off;
3210 
3211 		off = obj_to_index(s, slab, p[i]);
3212 		objcg = obj_exts[off].objcg;
3213 		if (!objcg)
3214 			continue;
3215 
3216 		obj_exts[off].objcg = NULL;
3217 		refill_obj_stock(objcg, obj_size, true, -obj_size,
3218 				 slab_pgdat(slab), cache_vmstat_idx(s));
3219 		obj_cgroup_put(objcg);
3220 	}
3221 }
3222 
3223 /*
3224  * The objcg is only set on the first page, so transfer it to all the
3225  * other pages.
3226  */
split_page_memcg(struct page * page,unsigned order)3227 void split_page_memcg(struct page *page, unsigned order)
3228 {
3229 	struct obj_cgroup *objcg = page_objcg(page);
3230 	unsigned int i, nr = 1 << order;
3231 
3232 	if (!objcg)
3233 		return;
3234 
3235 	for (i = 1; i < nr; i++)
3236 		page_set_objcg(&page[i], objcg);
3237 
3238 	obj_cgroup_get_many(objcg, nr - 1);
3239 }
3240 
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)3241 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3242 		unsigned new_order)
3243 {
3244 	unsigned new_refs;
3245 
3246 	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3247 		return;
3248 
3249 	new_refs = (1 << (old_order - new_order)) - 1;
3250 	css_get_many(&__folio_memcg(folio)->css, new_refs);
3251 }
3252 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3253 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3254 {
3255 	unsigned long val;
3256 
3257 	if (mem_cgroup_is_root(memcg)) {
3258 		/*
3259 		 * Approximate root's usage from global state. This isn't
3260 		 * perfect, but the root usage was always an approximation.
3261 		 */
3262 		val = global_node_page_state(NR_FILE_PAGES) +
3263 			global_node_page_state(NR_ANON_MAPPED);
3264 		if (swap)
3265 			val += total_swap_pages - get_nr_swap_pages();
3266 	} else {
3267 		if (!swap)
3268 			val = page_counter_read(&memcg->memory);
3269 		else
3270 			val = page_counter_read(&memcg->memsw);
3271 	}
3272 	return val;
3273 }
3274 
memcg_online_kmem(struct mem_cgroup * memcg)3275 static int memcg_online_kmem(struct mem_cgroup *memcg)
3276 {
3277 	struct obj_cgroup *objcg;
3278 
3279 	if (mem_cgroup_kmem_disabled())
3280 		return 0;
3281 
3282 	if (unlikely(mem_cgroup_is_root(memcg)))
3283 		return 0;
3284 
3285 	objcg = obj_cgroup_alloc();
3286 	if (!objcg)
3287 		return -ENOMEM;
3288 
3289 	objcg->memcg = memcg;
3290 	rcu_assign_pointer(memcg->objcg, objcg);
3291 	obj_cgroup_get(objcg);
3292 	memcg->orig_objcg = objcg;
3293 
3294 	static_branch_enable(&memcg_kmem_online_key);
3295 
3296 	memcg->kmemcg_id = memcg->id.id;
3297 
3298 	return 0;
3299 }
3300 
memcg_offline_kmem(struct mem_cgroup * memcg)3301 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3302 {
3303 	struct mem_cgroup *parent;
3304 
3305 	if (mem_cgroup_kmem_disabled())
3306 		return;
3307 
3308 	if (unlikely(mem_cgroup_is_root(memcg)))
3309 		return;
3310 
3311 	parent = parent_mem_cgroup(memcg);
3312 	if (!parent)
3313 		parent = root_mem_cgroup;
3314 
3315 	memcg_reparent_list_lrus(memcg, parent);
3316 
3317 	/*
3318 	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3319 	 * helpers won't use parent's list_lru until child is drained.
3320 	 */
3321 	memcg_reparent_objcgs(memcg, parent);
3322 }
3323 
3324 #ifdef CONFIG_CGROUP_WRITEBACK
3325 
3326 #include <trace/events/writeback.h>
3327 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3328 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3329 {
3330 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3331 }
3332 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3333 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3334 {
3335 	wb_domain_exit(&memcg->cgwb_domain);
3336 }
3337 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3338 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3339 {
3340 	wb_domain_size_changed(&memcg->cgwb_domain);
3341 }
3342 
mem_cgroup_wb_domain(struct bdi_writeback * wb)3343 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3344 {
3345 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3346 
3347 	if (!memcg->css.parent)
3348 		return NULL;
3349 
3350 	return &memcg->cgwb_domain;
3351 }
3352 
3353 /**
3354  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3355  * @wb: bdi_writeback in question
3356  * @pfilepages: out parameter for number of file pages
3357  * @pheadroom: out parameter for number of allocatable pages according to memcg
3358  * @pdirty: out parameter for number of dirty pages
3359  * @pwriteback: out parameter for number of pages under writeback
3360  *
3361  * Determine the numbers of file, headroom, dirty, and writeback pages in
3362  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3363  * is a bit more involved.
3364  *
3365  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3366  * headroom is calculated as the lowest headroom of itself and the
3367  * ancestors.  Note that this doesn't consider the actual amount of
3368  * available memory in the system.  The caller should further cap
3369  * *@pheadroom accordingly.
3370  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3371 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3372 			 unsigned long *pheadroom, unsigned long *pdirty,
3373 			 unsigned long *pwriteback)
3374 {
3375 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3376 	struct mem_cgroup *parent;
3377 
3378 	mem_cgroup_flush_stats_ratelimited(memcg);
3379 
3380 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3381 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3382 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3383 			memcg_page_state(memcg, NR_ACTIVE_FILE);
3384 
3385 	*pheadroom = PAGE_COUNTER_MAX;
3386 	while ((parent = parent_mem_cgroup(memcg))) {
3387 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3388 					    READ_ONCE(memcg->memory.high));
3389 		unsigned long used = page_counter_read(&memcg->memory);
3390 
3391 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3392 		memcg = parent;
3393 	}
3394 }
3395 
3396 /*
3397  * Foreign dirty flushing
3398  *
3399  * There's an inherent mismatch between memcg and writeback.  The former
3400  * tracks ownership per-page while the latter per-inode.  This was a
3401  * deliberate design decision because honoring per-page ownership in the
3402  * writeback path is complicated, may lead to higher CPU and IO overheads
3403  * and deemed unnecessary given that write-sharing an inode across
3404  * different cgroups isn't a common use-case.
3405  *
3406  * Combined with inode majority-writer ownership switching, this works well
3407  * enough in most cases but there are some pathological cases.  For
3408  * example, let's say there are two cgroups A and B which keep writing to
3409  * different but confined parts of the same inode.  B owns the inode and
3410  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3411  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3412  * triggering background writeback.  A will be slowed down without a way to
3413  * make writeback of the dirty pages happen.
3414  *
3415  * Conditions like the above can lead to a cgroup getting repeatedly and
3416  * severely throttled after making some progress after each
3417  * dirty_expire_interval while the underlying IO device is almost
3418  * completely idle.
3419  *
3420  * Solving this problem completely requires matching the ownership tracking
3421  * granularities between memcg and writeback in either direction.  However,
3422  * the more egregious behaviors can be avoided by simply remembering the
3423  * most recent foreign dirtying events and initiating remote flushes on
3424  * them when local writeback isn't enough to keep the memory clean enough.
3425  *
3426  * The following two functions implement such mechanism.  When a foreign
3427  * page - a page whose memcg and writeback ownerships don't match - is
3428  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3429  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3430  * decides that the memcg needs to sleep due to high dirty ratio, it calls
3431  * mem_cgroup_flush_foreign() which queues writeback on the recorded
3432  * foreign bdi_writebacks which haven't expired.  Both the numbers of
3433  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3434  * limited to MEMCG_CGWB_FRN_CNT.
3435  *
3436  * The mechanism only remembers IDs and doesn't hold any object references.
3437  * As being wrong occasionally doesn't matter, updates and accesses to the
3438  * records are lockless and racy.
3439  */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3440 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3441 					     struct bdi_writeback *wb)
3442 {
3443 	struct mem_cgroup *memcg = folio_memcg(folio);
3444 	struct memcg_cgwb_frn *frn;
3445 	u64 now = get_jiffies_64();
3446 	u64 oldest_at = now;
3447 	int oldest = -1;
3448 	int i;
3449 
3450 	trace_track_foreign_dirty(folio, wb);
3451 
3452 	/*
3453 	 * Pick the slot to use.  If there is already a slot for @wb, keep
3454 	 * using it.  If not replace the oldest one which isn't being
3455 	 * written out.
3456 	 */
3457 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3458 		frn = &memcg->cgwb_frn[i];
3459 		if (frn->bdi_id == wb->bdi->id &&
3460 		    frn->memcg_id == wb->memcg_css->id)
3461 			break;
3462 		if (time_before64(frn->at, oldest_at) &&
3463 		    atomic_read(&frn->done.cnt) == 1) {
3464 			oldest = i;
3465 			oldest_at = frn->at;
3466 		}
3467 	}
3468 
3469 	if (i < MEMCG_CGWB_FRN_CNT) {
3470 		/*
3471 		 * Re-using an existing one.  Update timestamp lazily to
3472 		 * avoid making the cacheline hot.  We want them to be
3473 		 * reasonably up-to-date and significantly shorter than
3474 		 * dirty_expire_interval as that's what expires the record.
3475 		 * Use the shorter of 1s and dirty_expire_interval / 8.
3476 		 */
3477 		unsigned long update_intv =
3478 			min_t(unsigned long, HZ,
3479 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3480 
3481 		if (time_before64(frn->at, now - update_intv))
3482 			frn->at = now;
3483 	} else if (oldest >= 0) {
3484 		/* replace the oldest free one */
3485 		frn = &memcg->cgwb_frn[oldest];
3486 		frn->bdi_id = wb->bdi->id;
3487 		frn->memcg_id = wb->memcg_css->id;
3488 		frn->at = now;
3489 	}
3490 }
3491 
3492 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3493 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3494 {
3495 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3496 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3497 	u64 now = jiffies_64;
3498 	int i;
3499 
3500 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3501 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3502 
3503 		/*
3504 		 * If the record is older than dirty_expire_interval,
3505 		 * writeback on it has already started.  No need to kick it
3506 		 * off again.  Also, don't start a new one if there's
3507 		 * already one in flight.
3508 		 */
3509 		if (time_after64(frn->at, now - intv) &&
3510 		    atomic_read(&frn->done.cnt) == 1) {
3511 			frn->at = 0;
3512 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3513 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3514 					       WB_REASON_FOREIGN_FLUSH,
3515 					       &frn->done);
3516 		}
3517 	}
3518 }
3519 
3520 #else	/* CONFIG_CGROUP_WRITEBACK */
3521 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3522 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3523 {
3524 	return 0;
3525 }
3526 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3527 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3528 {
3529 }
3530 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3531 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3532 {
3533 }
3534 
3535 #endif	/* CONFIG_CGROUP_WRITEBACK */
3536 
3537 /*
3538  * Private memory cgroup IDR
3539  *
3540  * Swap-out records and page cache shadow entries need to store memcg
3541  * references in constrained space, so we maintain an ID space that is
3542  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3543  * memory-controlled cgroups to 64k.
3544  *
3545  * However, there usually are many references to the offline CSS after
3546  * the cgroup has been destroyed, such as page cache or reclaimable
3547  * slab objects, that don't need to hang on to the ID. We want to keep
3548  * those dead CSS from occupying IDs, or we might quickly exhaust the
3549  * relatively small ID space and prevent the creation of new cgroups
3550  * even when there are much fewer than 64k cgroups - possibly none.
3551  *
3552  * Maintain a private 16-bit ID space for memcg, and allow the ID to
3553  * be freed and recycled when it's no longer needed, which is usually
3554  * when the CSS is offlined.
3555  *
3556  * The only exception to that are records of swapped out tmpfs/shmem
3557  * pages that need to be attributed to live ancestors on swapin. But
3558  * those references are manageable from userspace.
3559  */
3560 
3561 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3562 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3563 
mem_cgroup_id_remove(struct mem_cgroup * memcg)3564 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3565 {
3566 	if (memcg->id.id > 0) {
3567 		xa_erase(&mem_cgroup_ids, memcg->id.id);
3568 		memcg->id.id = 0;
3569 	}
3570 }
3571 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3572 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3573 					   unsigned int n)
3574 {
3575 	refcount_add(n, &memcg->id.ref);
3576 }
3577 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3578 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3579 {
3580 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3581 		mem_cgroup_id_remove(memcg);
3582 
3583 		/* Memcg ID pins CSS */
3584 		css_put(&memcg->css);
3585 	}
3586 }
3587 
mem_cgroup_id_put(struct mem_cgroup * memcg)3588 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3589 {
3590 	mem_cgroup_id_put_many(memcg, 1);
3591 }
3592 
mem_cgroup_id_get_online(struct mem_cgroup * memcg)3593 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
3594 {
3595 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
3596 		/*
3597 		 * The root cgroup cannot be destroyed, so it's refcount must
3598 		 * always be >= 1.
3599 		 */
3600 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3601 			VM_BUG_ON(1);
3602 			break;
3603 		}
3604 		memcg = parent_mem_cgroup(memcg);
3605 		if (!memcg)
3606 			memcg = root_mem_cgroup;
3607 	}
3608 	return memcg;
3609 }
3610 
3611 /**
3612  * mem_cgroup_from_id - look up a memcg from a memcg id
3613  * @id: the memcg id to look up
3614  *
3615  * Caller must hold rcu_read_lock().
3616  */
mem_cgroup_from_id(unsigned short id)3617 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3618 {
3619 	WARN_ON_ONCE(!rcu_read_lock_held());
3620 	return xa_load(&mem_cgroup_ids, id);
3621 }
3622 
3623 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3624 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3625 {
3626 	struct cgroup *cgrp;
3627 	struct cgroup_subsys_state *css;
3628 	struct mem_cgroup *memcg;
3629 
3630 	cgrp = cgroup_get_from_id(ino);
3631 	if (IS_ERR(cgrp))
3632 		return ERR_CAST(cgrp);
3633 
3634 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3635 	if (css)
3636 		memcg = container_of(css, struct mem_cgroup, css);
3637 	else
3638 		memcg = ERR_PTR(-ENOENT);
3639 
3640 	cgroup_put(cgrp);
3641 
3642 	return memcg;
3643 }
3644 #endif
3645 
free_mem_cgroup_per_node_info(struct mem_cgroup_per_node * pn)3646 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3647 {
3648 	if (!pn)
3649 		return;
3650 
3651 	free_percpu(pn->lruvec_stats_percpu);
3652 	kfree(pn->lruvec_stats);
3653 	kfree(pn);
3654 }
3655 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3656 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3657 {
3658 	struct mem_cgroup_per_node *pn;
3659 
3660 	pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3661 				   node);
3662 	if (!pn)
3663 		return false;
3664 
3665 	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3666 					GFP_KERNEL_ACCOUNT, node);
3667 	if (!pn->lruvec_stats)
3668 		goto fail;
3669 
3670 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3671 						   GFP_KERNEL_ACCOUNT);
3672 	if (!pn->lruvec_stats_percpu)
3673 		goto fail;
3674 
3675 	lruvec_init(&pn->lruvec);
3676 	pn->memcg = memcg;
3677 
3678 	memcg->nodeinfo[node] = pn;
3679 	return true;
3680 fail:
3681 	free_mem_cgroup_per_node_info(pn);
3682 	return false;
3683 }
3684 
__mem_cgroup_free(struct mem_cgroup * memcg)3685 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3686 {
3687 	int node;
3688 
3689 	obj_cgroup_put(memcg->orig_objcg);
3690 
3691 	for_each_node(node)
3692 		free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3693 	memcg1_free_events(memcg);
3694 	kfree(memcg->vmstats);
3695 	free_percpu(memcg->vmstats_percpu);
3696 	kfree(memcg);
3697 }
3698 
mem_cgroup_free(struct mem_cgroup * memcg)3699 static void mem_cgroup_free(struct mem_cgroup *memcg)
3700 {
3701 	lru_gen_exit_memcg(memcg);
3702 	memcg_wb_domain_exit(memcg);
3703 	__mem_cgroup_free(memcg);
3704 }
3705 
mem_cgroup_alloc(struct mem_cgroup * parent)3706 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3707 {
3708 	struct memcg_vmstats_percpu *statc;
3709 	struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3710 	struct mem_cgroup *memcg;
3711 	int node, cpu;
3712 	int __maybe_unused i;
3713 	long error;
3714 
3715 	memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3716 	if (!memcg)
3717 		return ERR_PTR(-ENOMEM);
3718 
3719 	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3720 			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3721 	if (error)
3722 		goto fail;
3723 	error = -ENOMEM;
3724 
3725 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3726 				 GFP_KERNEL_ACCOUNT);
3727 	if (!memcg->vmstats)
3728 		goto fail;
3729 
3730 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3731 						 GFP_KERNEL_ACCOUNT);
3732 	if (!memcg->vmstats_percpu)
3733 		goto fail;
3734 
3735 	if (!memcg1_alloc_events(memcg))
3736 		goto fail;
3737 
3738 	for_each_possible_cpu(cpu) {
3739 		if (parent)
3740 			pstatc_pcpu = parent->vmstats_percpu;
3741 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3742 		statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
3743 		statc->vmstats = memcg->vmstats;
3744 	}
3745 
3746 	for_each_node(node)
3747 		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3748 			goto fail;
3749 
3750 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3751 		goto fail;
3752 
3753 	INIT_WORK(&memcg->high_work, high_work_func);
3754 	vmpressure_init(&memcg->vmpressure);
3755 	INIT_LIST_HEAD(&memcg->memory_peaks);
3756 	INIT_LIST_HEAD(&memcg->swap_peaks);
3757 	spin_lock_init(&memcg->peaks_lock);
3758 	memcg->socket_pressure = jiffies;
3759 	memcg1_memcg_init(memcg);
3760 	memcg->kmemcg_id = -1;
3761 	INIT_LIST_HEAD(&memcg->objcg_list);
3762 #ifdef CONFIG_CGROUP_WRITEBACK
3763 	INIT_LIST_HEAD(&memcg->cgwb_list);
3764 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3765 		memcg->cgwb_frn[i].done =
3766 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3767 #endif
3768 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3769 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3770 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3771 	memcg->deferred_split_queue.split_queue_len = 0;
3772 #endif
3773 	lru_gen_init_memcg(memcg);
3774 	return memcg;
3775 fail:
3776 	mem_cgroup_id_remove(memcg);
3777 	__mem_cgroup_free(memcg);
3778 	return ERR_PTR(error);
3779 }
3780 
3781 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3782 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3783 {
3784 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3785 	struct mem_cgroup *memcg, *old_memcg;
3786 	bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3787 
3788 	old_memcg = set_active_memcg(parent);
3789 	memcg = mem_cgroup_alloc(parent);
3790 	set_active_memcg(old_memcg);
3791 	if (IS_ERR(memcg))
3792 		return ERR_CAST(memcg);
3793 
3794 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3795 	memcg1_soft_limit_reset(memcg);
3796 #ifdef CONFIG_ZSWAP
3797 	memcg->zswap_max = PAGE_COUNTER_MAX;
3798 	WRITE_ONCE(memcg->zswap_writeback, true);
3799 #endif
3800 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3801 	if (parent) {
3802 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3803 
3804 		page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3805 		page_counter_init(&memcg->swap, &parent->swap, false);
3806 #ifdef CONFIG_MEMCG_V1
3807 		memcg->memory.track_failcnt = !memcg_on_dfl;
3808 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3809 		page_counter_init(&memcg->kmem, &parent->kmem, false);
3810 		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3811 #endif
3812 	} else {
3813 		init_memcg_stats();
3814 		init_memcg_events();
3815 		page_counter_init(&memcg->memory, NULL, true);
3816 		page_counter_init(&memcg->swap, NULL, false);
3817 #ifdef CONFIG_MEMCG_V1
3818 		page_counter_init(&memcg->kmem, NULL, false);
3819 		page_counter_init(&memcg->tcpmem, NULL, false);
3820 #endif
3821 		root_mem_cgroup = memcg;
3822 		return &memcg->css;
3823 	}
3824 
3825 	if (memcg_on_dfl && !cgroup_memory_nosocket)
3826 		static_branch_inc(&memcg_sockets_enabled_key);
3827 
3828 	if (!cgroup_memory_nobpf)
3829 		static_branch_inc(&memcg_bpf_enabled_key);
3830 
3831 	return &memcg->css;
3832 }
3833 
mem_cgroup_css_online(struct cgroup_subsys_state * css)3834 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3835 {
3836 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3837 
3838 	if (memcg_online_kmem(memcg))
3839 		goto remove_id;
3840 
3841 	/*
3842 	 * A memcg must be visible for expand_shrinker_info()
3843 	 * by the time the maps are allocated. So, we allocate maps
3844 	 * here, when for_each_mem_cgroup() can't skip it.
3845 	 */
3846 	if (alloc_shrinker_info(memcg))
3847 		goto offline_kmem;
3848 
3849 	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3850 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3851 				   FLUSH_TIME);
3852 	lru_gen_online_memcg(memcg);
3853 
3854 	/* Online state pins memcg ID, memcg ID pins CSS */
3855 	refcount_set(&memcg->id.ref, 1);
3856 	css_get(css);
3857 
3858 	/*
3859 	 * Ensure mem_cgroup_from_id() works once we're fully online.
3860 	 *
3861 	 * We could do this earlier and require callers to filter with
3862 	 * css_tryget_online(). But right now there are no users that
3863 	 * need earlier access, and the workingset code relies on the
3864 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3865 	 * publish it here at the end of onlining. This matches the
3866 	 * regular ID destruction during offlining.
3867 	 */
3868 	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3869 
3870 	return 0;
3871 offline_kmem:
3872 	memcg_offline_kmem(memcg);
3873 remove_id:
3874 	mem_cgroup_id_remove(memcg);
3875 	return -ENOMEM;
3876 }
3877 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3878 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3879 {
3880 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3881 
3882 	memcg1_css_offline(memcg);
3883 
3884 	page_counter_set_min(&memcg->memory, 0);
3885 	page_counter_set_low(&memcg->memory, 0);
3886 
3887 	zswap_memcg_offline_cleanup(memcg);
3888 
3889 	memcg_offline_kmem(memcg);
3890 	reparent_shrinker_deferred(memcg);
3891 	wb_memcg_offline(memcg);
3892 	lru_gen_offline_memcg(memcg);
3893 
3894 	drain_all_stock(memcg);
3895 
3896 	mem_cgroup_id_put(memcg);
3897 }
3898 
mem_cgroup_css_released(struct cgroup_subsys_state * css)3899 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3900 {
3901 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3902 
3903 	invalidate_reclaim_iterators(memcg);
3904 	lru_gen_release_memcg(memcg);
3905 }
3906 
mem_cgroup_css_free(struct cgroup_subsys_state * css)3907 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3908 {
3909 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3910 	int __maybe_unused i;
3911 
3912 #ifdef CONFIG_CGROUP_WRITEBACK
3913 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3914 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3915 #endif
3916 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3917 		static_branch_dec(&memcg_sockets_enabled_key);
3918 
3919 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3920 		static_branch_dec(&memcg_sockets_enabled_key);
3921 
3922 	if (!cgroup_memory_nobpf)
3923 		static_branch_dec(&memcg_bpf_enabled_key);
3924 
3925 	vmpressure_cleanup(&memcg->vmpressure);
3926 	cancel_work_sync(&memcg->high_work);
3927 	memcg1_remove_from_trees(memcg);
3928 	free_shrinker_info(memcg);
3929 	mem_cgroup_free(memcg);
3930 }
3931 
3932 /**
3933  * mem_cgroup_css_reset - reset the states of a mem_cgroup
3934  * @css: the target css
3935  *
3936  * Reset the states of the mem_cgroup associated with @css.  This is
3937  * invoked when the userland requests disabling on the default hierarchy
3938  * but the memcg is pinned through dependency.  The memcg should stop
3939  * applying policies and should revert to the vanilla state as it may be
3940  * made visible again.
3941  *
3942  * The current implementation only resets the essential configurations.
3943  * This needs to be expanded to cover all the visible parts.
3944  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3945 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3946 {
3947 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3948 
3949 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3950 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3951 #ifdef CONFIG_MEMCG_V1
3952 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3953 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3954 #endif
3955 	page_counter_set_min(&memcg->memory, 0);
3956 	page_counter_set_low(&memcg->memory, 0);
3957 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3958 	memcg1_soft_limit_reset(memcg);
3959 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3960 	memcg_wb_domain_size_changed(memcg);
3961 }
3962 
3963 struct aggregate_control {
3964 	/* pointer to the aggregated (CPU and subtree aggregated) counters */
3965 	long *aggregate;
3966 	/* pointer to the non-hierarchichal (CPU aggregated) counters */
3967 	long *local;
3968 	/* pointer to the pending child counters during tree propagation */
3969 	long *pending;
3970 	/* pointer to the parent's pending counters, could be NULL */
3971 	long *ppending;
3972 	/* pointer to the percpu counters to be aggregated */
3973 	long *cstat;
3974 	/* pointer to the percpu counters of the last aggregation*/
3975 	long *cstat_prev;
3976 	/* size of the above counters */
3977 	int size;
3978 };
3979 
mem_cgroup_stat_aggregate(struct aggregate_control * ac)3980 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3981 {
3982 	int i;
3983 	long delta, delta_cpu, v;
3984 
3985 	for (i = 0; i < ac->size; i++) {
3986 		/*
3987 		 * Collect the aggregated propagation counts of groups
3988 		 * below us. We're in a per-cpu loop here and this is
3989 		 * a global counter, so the first cycle will get them.
3990 		 */
3991 		delta = ac->pending[i];
3992 		if (delta)
3993 			ac->pending[i] = 0;
3994 
3995 		/* Add CPU changes on this level since the last flush */
3996 		delta_cpu = 0;
3997 		v = READ_ONCE(ac->cstat[i]);
3998 		if (v != ac->cstat_prev[i]) {
3999 			delta_cpu = v - ac->cstat_prev[i];
4000 			delta += delta_cpu;
4001 			ac->cstat_prev[i] = v;
4002 		}
4003 
4004 		/* Aggregate counts on this level and propagate upwards */
4005 		if (delta_cpu)
4006 			ac->local[i] += delta_cpu;
4007 
4008 		if (delta) {
4009 			ac->aggregate[i] += delta;
4010 			if (ac->ppending)
4011 				ac->ppending[i] += delta;
4012 		}
4013 	}
4014 }
4015 
4016 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4017 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4018 			    int cpu)
4019 {
4020 	int nid;
4021 
4022 	if (atomic_read(&memcg->kmem_stat)) {
4023 		int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4024 		int index = memcg_stats_index(MEMCG_KMEM);
4025 
4026 		memcg->vmstats->state[index] += kmem;
4027 		if (parent)
4028 			parent->vmstats->state_pending[index] += kmem;
4029 	}
4030 
4031 	for_each_node_state(nid, N_MEMORY) {
4032 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4033 		struct lruvec_stats *lstats = pn->lruvec_stats;
4034 		struct lruvec_stats *plstats = NULL;
4035 
4036 		if (parent)
4037 			plstats = parent->nodeinfo[nid]->lruvec_stats;
4038 
4039 		if (atomic_read(&pn->slab_reclaimable)) {
4040 			int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4041 			int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4042 
4043 			lstats->state[index] += slab;
4044 			if (plstats)
4045 				plstats->state_pending[index] += slab;
4046 		}
4047 		if (atomic_read(&pn->slab_unreclaimable)) {
4048 			int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4049 			int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4050 
4051 			lstats->state[index] += slab;
4052 			if (plstats)
4053 				plstats->state_pending[index] += slab;
4054 		}
4055 	}
4056 }
4057 #else
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4058 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4059 			    int cpu)
4060 {}
4061 #endif
4062 
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)4063 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4064 {
4065 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4066 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4067 	struct memcg_vmstats_percpu *statc;
4068 	struct aggregate_control ac;
4069 	int nid;
4070 
4071 	flush_nmi_stats(memcg, parent, cpu);
4072 
4073 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4074 
4075 	ac = (struct aggregate_control) {
4076 		.aggregate = memcg->vmstats->state,
4077 		.local = memcg->vmstats->state_local,
4078 		.pending = memcg->vmstats->state_pending,
4079 		.ppending = parent ? parent->vmstats->state_pending : NULL,
4080 		.cstat = statc->state,
4081 		.cstat_prev = statc->state_prev,
4082 		.size = MEMCG_VMSTAT_SIZE,
4083 	};
4084 	mem_cgroup_stat_aggregate(&ac);
4085 
4086 	ac = (struct aggregate_control) {
4087 		.aggregate = memcg->vmstats->events,
4088 		.local = memcg->vmstats->events_local,
4089 		.pending = memcg->vmstats->events_pending,
4090 		.ppending = parent ? parent->vmstats->events_pending : NULL,
4091 		.cstat = statc->events,
4092 		.cstat_prev = statc->events_prev,
4093 		.size = NR_MEMCG_EVENTS,
4094 	};
4095 	mem_cgroup_stat_aggregate(&ac);
4096 
4097 	for_each_node_state(nid, N_MEMORY) {
4098 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4099 		struct lruvec_stats *lstats = pn->lruvec_stats;
4100 		struct lruvec_stats *plstats = NULL;
4101 		struct lruvec_stats_percpu *lstatc;
4102 
4103 		if (parent)
4104 			plstats = parent->nodeinfo[nid]->lruvec_stats;
4105 
4106 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4107 
4108 		ac = (struct aggregate_control) {
4109 			.aggregate = lstats->state,
4110 			.local = lstats->state_local,
4111 			.pending = lstats->state_pending,
4112 			.ppending = plstats ? plstats->state_pending : NULL,
4113 			.cstat = lstatc->state,
4114 			.cstat_prev = lstatc->state_prev,
4115 			.size = NR_MEMCG_NODE_STAT_ITEMS,
4116 		};
4117 		mem_cgroup_stat_aggregate(&ac);
4118 
4119 	}
4120 	WRITE_ONCE(statc->stats_updates, 0);
4121 	/* We are in a per-cpu loop here, only do the atomic write once */
4122 	if (atomic_read(&memcg->vmstats->stats_updates))
4123 		atomic_set(&memcg->vmstats->stats_updates, 0);
4124 }
4125 
mem_cgroup_fork(struct task_struct * task)4126 static void mem_cgroup_fork(struct task_struct *task)
4127 {
4128 	/*
4129 	 * Set the update flag to cause task->objcg to be initialized lazily
4130 	 * on the first allocation. It can be done without any synchronization
4131 	 * because it's always performed on the current task, so does
4132 	 * current_objcg_update().
4133 	 */
4134 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4135 }
4136 
mem_cgroup_exit(struct task_struct * task)4137 static void mem_cgroup_exit(struct task_struct *task)
4138 {
4139 	struct obj_cgroup *objcg = task->objcg;
4140 
4141 	objcg = (struct obj_cgroup *)
4142 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4143 	obj_cgroup_put(objcg);
4144 
4145 	/*
4146 	 * Some kernel allocations can happen after this point,
4147 	 * but let's ignore them. It can be done without any synchronization
4148 	 * because it's always performed on the current task, so does
4149 	 * current_objcg_update().
4150 	 */
4151 	task->objcg = NULL;
4152 }
4153 
4154 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4155 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4156 {
4157 	struct task_struct *task;
4158 	struct cgroup_subsys_state *css;
4159 
4160 	/* find the first leader if there is any */
4161 	cgroup_taskset_for_each_leader(task, css, tset)
4162 		break;
4163 
4164 	if (!task)
4165 		return;
4166 
4167 	task_lock(task);
4168 	if (task->mm && READ_ONCE(task->mm->owner) == task)
4169 		lru_gen_migrate_mm(task->mm);
4170 	task_unlock(task);
4171 }
4172 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4173 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4174 #endif /* CONFIG_LRU_GEN */
4175 
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)4176 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4177 {
4178 	struct task_struct *task;
4179 	struct cgroup_subsys_state *css;
4180 
4181 	cgroup_taskset_for_each(task, css, tset) {
4182 		/* atomically set the update bit */
4183 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4184 	}
4185 }
4186 
mem_cgroup_attach(struct cgroup_taskset * tset)4187 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4188 {
4189 	mem_cgroup_lru_gen_attach(tset);
4190 	mem_cgroup_kmem_attach(tset);
4191 }
4192 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)4193 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4194 {
4195 	if (value == PAGE_COUNTER_MAX)
4196 		seq_puts(m, "max\n");
4197 	else
4198 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4199 
4200 	return 0;
4201 }
4202 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)4203 static u64 memory_current_read(struct cgroup_subsys_state *css,
4204 			       struct cftype *cft)
4205 {
4206 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4207 
4208 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4209 }
4210 
4211 #define OFP_PEAK_UNSET (((-1UL)))
4212 
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)4213 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4214 {
4215 	struct cgroup_of_peak *ofp = of_peak(sf->private);
4216 	u64 fd_peak = READ_ONCE(ofp->value), peak;
4217 
4218 	/* User wants global or local peak? */
4219 	if (fd_peak == OFP_PEAK_UNSET)
4220 		peak = pc->watermark;
4221 	else
4222 		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4223 
4224 	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4225 	return 0;
4226 }
4227 
memory_peak_show(struct seq_file * sf,void * v)4228 static int memory_peak_show(struct seq_file *sf, void *v)
4229 {
4230 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4231 
4232 	return peak_show(sf, v, &memcg->memory);
4233 }
4234 
peak_open(struct kernfs_open_file * of)4235 static int peak_open(struct kernfs_open_file *of)
4236 {
4237 	struct cgroup_of_peak *ofp = of_peak(of);
4238 
4239 	ofp->value = OFP_PEAK_UNSET;
4240 	return 0;
4241 }
4242 
peak_release(struct kernfs_open_file * of)4243 static void peak_release(struct kernfs_open_file *of)
4244 {
4245 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4246 	struct cgroup_of_peak *ofp = of_peak(of);
4247 
4248 	if (ofp->value == OFP_PEAK_UNSET) {
4249 		/* fast path (no writes on this fd) */
4250 		return;
4251 	}
4252 	spin_lock(&memcg->peaks_lock);
4253 	list_del(&ofp->list);
4254 	spin_unlock(&memcg->peaks_lock);
4255 }
4256 
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)4257 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4258 			  loff_t off, struct page_counter *pc,
4259 			  struct list_head *watchers)
4260 {
4261 	unsigned long usage;
4262 	struct cgroup_of_peak *peer_ctx;
4263 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4264 	struct cgroup_of_peak *ofp = of_peak(of);
4265 
4266 	spin_lock(&memcg->peaks_lock);
4267 
4268 	usage = page_counter_read(pc);
4269 	WRITE_ONCE(pc->local_watermark, usage);
4270 
4271 	list_for_each_entry(peer_ctx, watchers, list)
4272 		if (usage > peer_ctx->value)
4273 			WRITE_ONCE(peer_ctx->value, usage);
4274 
4275 	/* initial write, register watcher */
4276 	if (ofp->value == OFP_PEAK_UNSET)
4277 		list_add(&ofp->list, watchers);
4278 
4279 	WRITE_ONCE(ofp->value, usage);
4280 	spin_unlock(&memcg->peaks_lock);
4281 
4282 	return nbytes;
4283 }
4284 
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4285 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4286 				 size_t nbytes, loff_t off)
4287 {
4288 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4289 
4290 	return peak_write(of, buf, nbytes, off, &memcg->memory,
4291 			  &memcg->memory_peaks);
4292 }
4293 
4294 #undef OFP_PEAK_UNSET
4295 
memory_min_show(struct seq_file * m,void * v)4296 static int memory_min_show(struct seq_file *m, void *v)
4297 {
4298 	return seq_puts_memcg_tunable(m,
4299 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4300 }
4301 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4302 static ssize_t memory_min_write(struct kernfs_open_file *of,
4303 				char *buf, size_t nbytes, loff_t off)
4304 {
4305 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4306 	unsigned long min;
4307 	int err;
4308 
4309 	buf = strstrip(buf);
4310 	err = page_counter_memparse(buf, "max", &min);
4311 	if (err)
4312 		return err;
4313 
4314 	page_counter_set_min(&memcg->memory, min);
4315 
4316 	return nbytes;
4317 }
4318 
memory_low_show(struct seq_file * m,void * v)4319 static int memory_low_show(struct seq_file *m, void *v)
4320 {
4321 	return seq_puts_memcg_tunable(m,
4322 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4323 }
4324 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4325 static ssize_t memory_low_write(struct kernfs_open_file *of,
4326 				char *buf, size_t nbytes, loff_t off)
4327 {
4328 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4329 	unsigned long low;
4330 	int err;
4331 
4332 	buf = strstrip(buf);
4333 	err = page_counter_memparse(buf, "max", &low);
4334 	if (err)
4335 		return err;
4336 
4337 	page_counter_set_low(&memcg->memory, low);
4338 
4339 	return nbytes;
4340 }
4341 
memory_high_show(struct seq_file * m,void * v)4342 static int memory_high_show(struct seq_file *m, void *v)
4343 {
4344 	return seq_puts_memcg_tunable(m,
4345 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4346 }
4347 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4348 static ssize_t memory_high_write(struct kernfs_open_file *of,
4349 				 char *buf, size_t nbytes, loff_t off)
4350 {
4351 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4352 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4353 	bool drained = false;
4354 	unsigned long high;
4355 	int err;
4356 
4357 	buf = strstrip(buf);
4358 	err = page_counter_memparse(buf, "max", &high);
4359 	if (err)
4360 		return err;
4361 
4362 	page_counter_set_high(&memcg->memory, high);
4363 
4364 	if (of->file->f_flags & O_NONBLOCK)
4365 		goto out;
4366 
4367 	for (;;) {
4368 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4369 		unsigned long reclaimed;
4370 
4371 		if (nr_pages <= high)
4372 			break;
4373 
4374 		if (signal_pending(current))
4375 			break;
4376 
4377 		if (!drained) {
4378 			drain_all_stock(memcg);
4379 			drained = true;
4380 			continue;
4381 		}
4382 
4383 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4384 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4385 
4386 		if (!reclaimed && !nr_retries--)
4387 			break;
4388 	}
4389 out:
4390 	memcg_wb_domain_size_changed(memcg);
4391 	return nbytes;
4392 }
4393 
memory_max_show(struct seq_file * m,void * v)4394 static int memory_max_show(struct seq_file *m, void *v)
4395 {
4396 	return seq_puts_memcg_tunable(m,
4397 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4398 }
4399 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4400 static ssize_t memory_max_write(struct kernfs_open_file *of,
4401 				char *buf, size_t nbytes, loff_t off)
4402 {
4403 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4404 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4405 	bool drained = false;
4406 	unsigned long max;
4407 	int err;
4408 
4409 	buf = strstrip(buf);
4410 	err = page_counter_memparse(buf, "max", &max);
4411 	if (err)
4412 		return err;
4413 
4414 	xchg(&memcg->memory.max, max);
4415 
4416 	if (of->file->f_flags & O_NONBLOCK)
4417 		goto out;
4418 
4419 	for (;;) {
4420 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4421 
4422 		if (nr_pages <= max)
4423 			break;
4424 
4425 		if (signal_pending(current))
4426 			break;
4427 
4428 		if (!drained) {
4429 			drain_all_stock(memcg);
4430 			drained = true;
4431 			continue;
4432 		}
4433 
4434 		if (nr_reclaims) {
4435 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4436 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4437 				nr_reclaims--;
4438 			continue;
4439 		}
4440 
4441 		memcg_memory_event(memcg, MEMCG_OOM);
4442 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4443 			break;
4444 		cond_resched();
4445 	}
4446 out:
4447 	memcg_wb_domain_size_changed(memcg);
4448 	return nbytes;
4449 }
4450 
4451 /*
4452  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4453  * if any new events become available.
4454  */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4455 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4456 {
4457 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4458 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4459 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4460 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4461 	seq_printf(m, "oom_kill %lu\n",
4462 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4463 	seq_printf(m, "oom_group_kill %lu\n",
4464 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4465 }
4466 
memory_events_show(struct seq_file * m,void * v)4467 static int memory_events_show(struct seq_file *m, void *v)
4468 {
4469 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4470 
4471 	__memory_events_show(m, memcg->memory_events);
4472 	return 0;
4473 }
4474 
memory_events_local_show(struct seq_file * m,void * v)4475 static int memory_events_local_show(struct seq_file *m, void *v)
4476 {
4477 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4478 
4479 	__memory_events_show(m, memcg->memory_events_local);
4480 	return 0;
4481 }
4482 
memory_stat_show(struct seq_file * m,void * v)4483 int memory_stat_show(struct seq_file *m, void *v)
4484 {
4485 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4486 	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4487 	struct seq_buf s;
4488 
4489 	if (!buf)
4490 		return -ENOMEM;
4491 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4492 	memory_stat_format(memcg, &s);
4493 	seq_puts(m, buf);
4494 	kfree(buf);
4495 	return 0;
4496 }
4497 
4498 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4499 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4500 						     int item)
4501 {
4502 	return lruvec_page_state(lruvec, item) *
4503 		memcg_page_state_output_unit(item);
4504 }
4505 
memory_numa_stat_show(struct seq_file * m,void * v)4506 static int memory_numa_stat_show(struct seq_file *m, void *v)
4507 {
4508 	int i;
4509 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4510 
4511 	mem_cgroup_flush_stats(memcg);
4512 
4513 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4514 		int nid;
4515 
4516 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4517 			continue;
4518 
4519 		seq_printf(m, "%s", memory_stats[i].name);
4520 		for_each_node_state(nid, N_MEMORY) {
4521 			u64 size;
4522 			struct lruvec *lruvec;
4523 
4524 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4525 			size = lruvec_page_state_output(lruvec,
4526 							memory_stats[i].idx);
4527 			seq_printf(m, " N%d=%llu", nid, size);
4528 		}
4529 		seq_putc(m, '\n');
4530 	}
4531 
4532 	return 0;
4533 }
4534 #endif
4535 
memory_oom_group_show(struct seq_file * m,void * v)4536 static int memory_oom_group_show(struct seq_file *m, void *v)
4537 {
4538 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4539 
4540 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4541 
4542 	return 0;
4543 }
4544 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4545 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4546 				      char *buf, size_t nbytes, loff_t off)
4547 {
4548 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4549 	int ret, oom_group;
4550 
4551 	buf = strstrip(buf);
4552 	if (!buf)
4553 		return -EINVAL;
4554 
4555 	ret = kstrtoint(buf, 0, &oom_group);
4556 	if (ret)
4557 		return ret;
4558 
4559 	if (oom_group != 0 && oom_group != 1)
4560 		return -EINVAL;
4561 
4562 	WRITE_ONCE(memcg->oom_group, oom_group);
4563 
4564 	return nbytes;
4565 }
4566 
4567 enum {
4568 	MEMORY_RECLAIM_SWAPPINESS = 0,
4569 	MEMORY_RECLAIM_SWAPPINESS_MAX,
4570 	MEMORY_RECLAIM_NULL,
4571 };
4572 
4573 static const match_table_t tokens = {
4574 	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4575 	{ MEMORY_RECLAIM_SWAPPINESS_MAX, "swappiness=max"},
4576 	{ MEMORY_RECLAIM_NULL, NULL },
4577 };
4578 
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4579 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4580 			      size_t nbytes, loff_t off)
4581 {
4582 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4583 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4584 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4585 	int swappiness = -1;
4586 	unsigned int reclaim_options;
4587 	char *old_buf, *start;
4588 	substring_t args[MAX_OPT_ARGS];
4589 
4590 	buf = strstrip(buf);
4591 
4592 	old_buf = buf;
4593 	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4594 	if (buf == old_buf)
4595 		return -EINVAL;
4596 
4597 	buf = strstrip(buf);
4598 
4599 	while ((start = strsep(&buf, " ")) != NULL) {
4600 		if (!strlen(start))
4601 			continue;
4602 		switch (match_token(start, tokens, args)) {
4603 		case MEMORY_RECLAIM_SWAPPINESS:
4604 			if (match_int(&args[0], &swappiness))
4605 				return -EINVAL;
4606 			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4607 				return -EINVAL;
4608 			break;
4609 		case MEMORY_RECLAIM_SWAPPINESS_MAX:
4610 			swappiness = SWAPPINESS_ANON_ONLY;
4611 			break;
4612 		default:
4613 			return -EINVAL;
4614 		}
4615 	}
4616 
4617 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4618 	while (nr_reclaimed < nr_to_reclaim) {
4619 		/* Will converge on zero, but reclaim enforces a minimum */
4620 		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4621 		unsigned long reclaimed;
4622 
4623 		if (signal_pending(current))
4624 			return -EINTR;
4625 
4626 		/*
4627 		 * This is the final attempt, drain percpu lru caches in the
4628 		 * hope of introducing more evictable pages for
4629 		 * try_to_free_mem_cgroup_pages().
4630 		 */
4631 		if (!nr_retries)
4632 			lru_add_drain_all();
4633 
4634 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4635 					batch_size, GFP_KERNEL,
4636 					reclaim_options,
4637 					swappiness == -1 ? NULL : &swappiness);
4638 
4639 		if (!reclaimed && !nr_retries--)
4640 			return -EAGAIN;
4641 
4642 		nr_reclaimed += reclaimed;
4643 	}
4644 
4645 	return nbytes;
4646 }
4647 
4648 static struct cftype memory_files[] = {
4649 	{
4650 		.name = "current",
4651 		.flags = CFTYPE_NOT_ON_ROOT,
4652 		.read_u64 = memory_current_read,
4653 	},
4654 	{
4655 		.name = "peak",
4656 		.flags = CFTYPE_NOT_ON_ROOT,
4657 		.open = peak_open,
4658 		.release = peak_release,
4659 		.seq_show = memory_peak_show,
4660 		.write = memory_peak_write,
4661 	},
4662 	{
4663 		.name = "min",
4664 		.flags = CFTYPE_NOT_ON_ROOT,
4665 		.seq_show = memory_min_show,
4666 		.write = memory_min_write,
4667 	},
4668 	{
4669 		.name = "low",
4670 		.flags = CFTYPE_NOT_ON_ROOT,
4671 		.seq_show = memory_low_show,
4672 		.write = memory_low_write,
4673 	},
4674 	{
4675 		.name = "high",
4676 		.flags = CFTYPE_NOT_ON_ROOT,
4677 		.seq_show = memory_high_show,
4678 		.write = memory_high_write,
4679 	},
4680 	{
4681 		.name = "max",
4682 		.flags = CFTYPE_NOT_ON_ROOT,
4683 		.seq_show = memory_max_show,
4684 		.write = memory_max_write,
4685 	},
4686 	{
4687 		.name = "events",
4688 		.flags = CFTYPE_NOT_ON_ROOT,
4689 		.file_offset = offsetof(struct mem_cgroup, events_file),
4690 		.seq_show = memory_events_show,
4691 	},
4692 	{
4693 		.name = "events.local",
4694 		.flags = CFTYPE_NOT_ON_ROOT,
4695 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4696 		.seq_show = memory_events_local_show,
4697 	},
4698 	{
4699 		.name = "stat",
4700 		.seq_show = memory_stat_show,
4701 	},
4702 #ifdef CONFIG_NUMA
4703 	{
4704 		.name = "numa_stat",
4705 		.seq_show = memory_numa_stat_show,
4706 	},
4707 #endif
4708 	{
4709 		.name = "oom.group",
4710 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4711 		.seq_show = memory_oom_group_show,
4712 		.write = memory_oom_group_write,
4713 	},
4714 	{
4715 		.name = "reclaim",
4716 		.flags = CFTYPE_NS_DELEGATABLE,
4717 		.write = memory_reclaim,
4718 	},
4719 	{ }	/* terminate */
4720 };
4721 
4722 struct cgroup_subsys memory_cgrp_subsys = {
4723 	.css_alloc = mem_cgroup_css_alloc,
4724 	.css_online = mem_cgroup_css_online,
4725 	.css_offline = mem_cgroup_css_offline,
4726 	.css_released = mem_cgroup_css_released,
4727 	.css_free = mem_cgroup_css_free,
4728 	.css_reset = mem_cgroup_css_reset,
4729 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4730 	.attach = mem_cgroup_attach,
4731 	.fork = mem_cgroup_fork,
4732 	.exit = mem_cgroup_exit,
4733 	.dfl_cftypes = memory_files,
4734 #ifdef CONFIG_MEMCG_V1
4735 	.legacy_cftypes = mem_cgroup_legacy_files,
4736 #endif
4737 	.early_init = 0,
4738 };
4739 
4740 /**
4741  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4742  * @root: the top ancestor of the sub-tree being checked
4743  * @memcg: the memory cgroup to check
4744  *
4745  * WARNING: This function is not stateless! It can only be used as part
4746  *          of a top-down tree iteration, not for isolated queries.
4747  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4748 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4749 				     struct mem_cgroup *memcg)
4750 {
4751 	bool recursive_protection =
4752 		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4753 
4754 	if (mem_cgroup_disabled())
4755 		return;
4756 
4757 	if (!root)
4758 		root = root_mem_cgroup;
4759 
4760 	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4761 }
4762 
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4763 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4764 			gfp_t gfp)
4765 {
4766 	int ret;
4767 
4768 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4769 	if (ret)
4770 		goto out;
4771 
4772 	css_get(&memcg->css);
4773 	commit_charge(folio, memcg);
4774 	memcg1_commit_charge(folio, memcg);
4775 out:
4776 	return ret;
4777 }
4778 
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4779 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4780 {
4781 	struct mem_cgroup *memcg;
4782 	int ret;
4783 
4784 	memcg = get_mem_cgroup_from_mm(mm);
4785 	ret = charge_memcg(folio, memcg, gfp);
4786 	css_put(&memcg->css);
4787 
4788 	return ret;
4789 }
4790 
4791 /**
4792  * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4793  * @folio: folio being charged
4794  * @gfp: reclaim mode
4795  *
4796  * This function is called when allocating a huge page folio, after the page has
4797  * already been obtained and charged to the appropriate hugetlb cgroup
4798  * controller (if it is enabled).
4799  *
4800  * Returns ENOMEM if the memcg is already full.
4801  * Returns 0 if either the charge was successful, or if we skip the charging.
4802  */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4803 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4804 {
4805 	struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4806 	int ret = 0;
4807 
4808 	/*
4809 	 * Even memcg does not account for hugetlb, we still want to update
4810 	 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4811 	 * charging the memcg.
4812 	 */
4813 	if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4814 		!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4815 		goto out;
4816 
4817 	if (charge_memcg(folio, memcg, gfp))
4818 		ret = -ENOMEM;
4819 
4820 out:
4821 	mem_cgroup_put(memcg);
4822 	return ret;
4823 }
4824 
4825 /**
4826  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4827  * @folio: folio to charge.
4828  * @mm: mm context of the victim
4829  * @gfp: reclaim mode
4830  * @entry: swap entry for which the folio is allocated
4831  *
4832  * This function charges a folio allocated for swapin. Please call this before
4833  * adding the folio to the swapcache.
4834  *
4835  * Returns 0 on success. Otherwise, an error code is returned.
4836  */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4837 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4838 				  gfp_t gfp, swp_entry_t entry)
4839 {
4840 	struct mem_cgroup *memcg;
4841 	unsigned short id;
4842 	int ret;
4843 
4844 	if (mem_cgroup_disabled())
4845 		return 0;
4846 
4847 	id = lookup_swap_cgroup_id(entry);
4848 	rcu_read_lock();
4849 	memcg = mem_cgroup_from_id(id);
4850 	if (!memcg || !css_tryget_online(&memcg->css))
4851 		memcg = get_mem_cgroup_from_mm(mm);
4852 	rcu_read_unlock();
4853 
4854 	ret = charge_memcg(folio, memcg, gfp);
4855 
4856 	css_put(&memcg->css);
4857 	return ret;
4858 }
4859 
4860 struct uncharge_gather {
4861 	struct mem_cgroup *memcg;
4862 	unsigned long nr_memory;
4863 	unsigned long pgpgout;
4864 	unsigned long nr_kmem;
4865 	int nid;
4866 };
4867 
uncharge_gather_clear(struct uncharge_gather * ug)4868 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4869 {
4870 	memset(ug, 0, sizeof(*ug));
4871 }
4872 
uncharge_batch(const struct uncharge_gather * ug)4873 static void uncharge_batch(const struct uncharge_gather *ug)
4874 {
4875 	if (ug->nr_memory) {
4876 		memcg_uncharge(ug->memcg, ug->nr_memory);
4877 		if (ug->nr_kmem) {
4878 			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4879 			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4880 		}
4881 		memcg1_oom_recover(ug->memcg);
4882 	}
4883 
4884 	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4885 
4886 	/* drop reference from uncharge_folio */
4887 	css_put(&ug->memcg->css);
4888 }
4889 
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4890 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4891 {
4892 	long nr_pages;
4893 	struct mem_cgroup *memcg;
4894 	struct obj_cgroup *objcg;
4895 
4896 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4897 
4898 	/*
4899 	 * Nobody should be changing or seriously looking at
4900 	 * folio memcg or objcg at this point, we have fully
4901 	 * exclusive access to the folio.
4902 	 */
4903 	if (folio_memcg_kmem(folio)) {
4904 		objcg = __folio_objcg(folio);
4905 		/*
4906 		 * This get matches the put at the end of the function and
4907 		 * kmem pages do not hold memcg references anymore.
4908 		 */
4909 		memcg = get_mem_cgroup_from_objcg(objcg);
4910 	} else {
4911 		memcg = __folio_memcg(folio);
4912 	}
4913 
4914 	if (!memcg)
4915 		return;
4916 
4917 	if (ug->memcg != memcg) {
4918 		if (ug->memcg) {
4919 			uncharge_batch(ug);
4920 			uncharge_gather_clear(ug);
4921 		}
4922 		ug->memcg = memcg;
4923 		ug->nid = folio_nid(folio);
4924 
4925 		/* pairs with css_put in uncharge_batch */
4926 		css_get(&memcg->css);
4927 	}
4928 
4929 	nr_pages = folio_nr_pages(folio);
4930 
4931 	if (folio_memcg_kmem(folio)) {
4932 		ug->nr_memory += nr_pages;
4933 		ug->nr_kmem += nr_pages;
4934 
4935 		folio->memcg_data = 0;
4936 		obj_cgroup_put(objcg);
4937 	} else {
4938 		/* LRU pages aren't accounted at the root level */
4939 		if (!mem_cgroup_is_root(memcg))
4940 			ug->nr_memory += nr_pages;
4941 		ug->pgpgout++;
4942 
4943 		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4944 		folio->memcg_data = 0;
4945 	}
4946 
4947 	css_put(&memcg->css);
4948 }
4949 
__mem_cgroup_uncharge(struct folio * folio)4950 void __mem_cgroup_uncharge(struct folio *folio)
4951 {
4952 	struct uncharge_gather ug;
4953 
4954 	/* Don't touch folio->lru of any random page, pre-check: */
4955 	if (!folio_memcg_charged(folio))
4956 		return;
4957 
4958 	uncharge_gather_clear(&ug);
4959 	uncharge_folio(folio, &ug);
4960 	uncharge_batch(&ug);
4961 }
4962 
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4963 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4964 {
4965 	struct uncharge_gather ug;
4966 	unsigned int i;
4967 
4968 	uncharge_gather_clear(&ug);
4969 	for (i = 0; i < folios->nr; i++)
4970 		uncharge_folio(folios->folios[i], &ug);
4971 	if (ug.memcg)
4972 		uncharge_batch(&ug);
4973 }
4974 
4975 /**
4976  * mem_cgroup_replace_folio - Charge a folio's replacement.
4977  * @old: Currently circulating folio.
4978  * @new: Replacement folio.
4979  *
4980  * Charge @new as a replacement folio for @old. @old will
4981  * be uncharged upon free.
4982  *
4983  * Both folios must be locked, @new->mapping must be set up.
4984  */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4985 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4986 {
4987 	struct mem_cgroup *memcg;
4988 	long nr_pages = folio_nr_pages(new);
4989 
4990 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4991 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4992 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4993 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4994 
4995 	if (mem_cgroup_disabled())
4996 		return;
4997 
4998 	/* Page cache replacement: new folio already charged? */
4999 	if (folio_memcg_charged(new))
5000 		return;
5001 
5002 	memcg = folio_memcg(old);
5003 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
5004 	if (!memcg)
5005 		return;
5006 
5007 	/* Force-charge the new page. The old one will be freed soon */
5008 	if (!mem_cgroup_is_root(memcg)) {
5009 		page_counter_charge(&memcg->memory, nr_pages);
5010 		if (do_memsw_account())
5011 			page_counter_charge(&memcg->memsw, nr_pages);
5012 	}
5013 
5014 	css_get(&memcg->css);
5015 	commit_charge(new, memcg);
5016 	memcg1_commit_charge(new, memcg);
5017 }
5018 
5019 /**
5020  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
5021  * @old: Currently circulating folio.
5022  * @new: Replacement folio.
5023  *
5024  * Transfer the memcg data from the old folio to the new folio for migration.
5025  * The old folio's data info will be cleared. Note that the memory counters
5026  * will remain unchanged throughout the process.
5027  *
5028  * Both folios must be locked, @new->mapping must be set up.
5029  */
mem_cgroup_migrate(struct folio * old,struct folio * new)5030 void mem_cgroup_migrate(struct folio *old, struct folio *new)
5031 {
5032 	struct mem_cgroup *memcg;
5033 
5034 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5035 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5036 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5037 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
5038 	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
5039 
5040 	if (mem_cgroup_disabled())
5041 		return;
5042 
5043 	memcg = folio_memcg(old);
5044 	/*
5045 	 * Note that it is normal to see !memcg for a hugetlb folio.
5046 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
5047 	 * was not selected.
5048 	 */
5049 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
5050 	if (!memcg)
5051 		return;
5052 
5053 	/* Transfer the charge and the css ref */
5054 	commit_charge(new, memcg);
5055 
5056 	/* Warning should never happen, so don't worry about refcount non-0 */
5057 	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
5058 	old->memcg_data = 0;
5059 }
5060 
5061 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5062 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5063 
mem_cgroup_sk_alloc(struct sock * sk)5064 void mem_cgroup_sk_alloc(struct sock *sk)
5065 {
5066 	struct mem_cgroup *memcg;
5067 
5068 	if (!mem_cgroup_sockets_enabled)
5069 		return;
5070 
5071 	/* Do not associate the sock with unrelated interrupted task's memcg. */
5072 	if (!in_task())
5073 		return;
5074 
5075 	rcu_read_lock();
5076 	memcg = mem_cgroup_from_task(current);
5077 	if (mem_cgroup_is_root(memcg))
5078 		goto out;
5079 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5080 		goto out;
5081 	if (css_tryget(&memcg->css))
5082 		sk->sk_memcg = memcg;
5083 out:
5084 	rcu_read_unlock();
5085 }
5086 
mem_cgroup_sk_free(struct sock * sk)5087 void mem_cgroup_sk_free(struct sock *sk)
5088 {
5089 	if (sk->sk_memcg)
5090 		css_put(&sk->sk_memcg->css);
5091 }
5092 
5093 /**
5094  * mem_cgroup_charge_skmem - charge socket memory
5095  * @memcg: memcg to charge
5096  * @nr_pages: number of pages to charge
5097  * @gfp_mask: reclaim mode
5098  *
5099  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5100  * @memcg's configured limit, %false if it doesn't.
5101  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)5102 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
5103 			     gfp_t gfp_mask)
5104 {
5105 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5106 		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5107 
5108 	if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5109 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5110 		return true;
5111 	}
5112 
5113 	return false;
5114 }
5115 
5116 /**
5117  * mem_cgroup_uncharge_skmem - uncharge socket memory
5118  * @memcg: memcg to uncharge
5119  * @nr_pages: number of pages to uncharge
5120  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)5121 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5122 {
5123 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5124 		memcg1_uncharge_skmem(memcg, nr_pages);
5125 		return;
5126 	}
5127 
5128 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5129 
5130 	refill_stock(memcg, nr_pages);
5131 }
5132 
cgroup_memory(char * s)5133 static int __init cgroup_memory(char *s)
5134 {
5135 	char *token;
5136 
5137 	while ((token = strsep(&s, ",")) != NULL) {
5138 		if (!*token)
5139 			continue;
5140 		if (!strcmp(token, "nosocket"))
5141 			cgroup_memory_nosocket = true;
5142 		if (!strcmp(token, "nokmem"))
5143 			cgroup_memory_nokmem = true;
5144 		if (!strcmp(token, "nobpf"))
5145 			cgroup_memory_nobpf = true;
5146 	}
5147 	return 1;
5148 }
5149 __setup("cgroup.memory=", cgroup_memory);
5150 
5151 /*
5152  * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5153  *
5154  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5155  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5156  * basically everything that doesn't depend on a specific mem_cgroup structure
5157  * should be initialized from here.
5158  */
mem_cgroup_init(void)5159 int __init mem_cgroup_init(void)
5160 {
5161 	unsigned int memcg_size;
5162 	int cpu;
5163 
5164 	/*
5165 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5166 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
5167 	 * to work fine, we should make sure that the overfill threshold can't
5168 	 * exceed S32_MAX / PAGE_SIZE.
5169 	 */
5170 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5171 
5172 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5173 				  memcg_hotplug_cpu_dead);
5174 
5175 	for_each_possible_cpu(cpu) {
5176 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5177 			  drain_local_memcg_stock);
5178 		INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5179 			  drain_local_obj_stock);
5180 	}
5181 
5182 	memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5183 	memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5184 					 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5185 
5186 	memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5187 				     SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5188 
5189 	return 0;
5190 }
5191 
5192 #ifdef CONFIG_SWAP
5193 /**
5194  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5195  * @folio: folio being added to swap
5196  * @entry: swap entry to charge
5197  *
5198  * Try to charge @folio's memcg for the swap space at @entry.
5199  *
5200  * Returns 0 on success, -ENOMEM on failure.
5201  */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5202 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5203 {
5204 	unsigned int nr_pages = folio_nr_pages(folio);
5205 	struct page_counter *counter;
5206 	struct mem_cgroup *memcg;
5207 
5208 	if (do_memsw_account())
5209 		return 0;
5210 
5211 	memcg = folio_memcg(folio);
5212 
5213 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5214 	if (!memcg)
5215 		return 0;
5216 
5217 	if (!entry.val) {
5218 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5219 		return 0;
5220 	}
5221 
5222 	memcg = mem_cgroup_id_get_online(memcg);
5223 
5224 	if (!mem_cgroup_is_root(memcg) &&
5225 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5226 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5227 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5228 		mem_cgroup_id_put(memcg);
5229 		return -ENOMEM;
5230 	}
5231 
5232 	/* Get references for the tail pages, too */
5233 	if (nr_pages > 1)
5234 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5235 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5236 
5237 	swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
5238 
5239 	return 0;
5240 }
5241 
5242 /**
5243  * __mem_cgroup_uncharge_swap - uncharge swap space
5244  * @entry: swap entry to uncharge
5245  * @nr_pages: the amount of swap space to uncharge
5246  */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5247 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5248 {
5249 	struct mem_cgroup *memcg;
5250 	unsigned short id;
5251 
5252 	id = swap_cgroup_clear(entry, nr_pages);
5253 	rcu_read_lock();
5254 	memcg = mem_cgroup_from_id(id);
5255 	if (memcg) {
5256 		if (!mem_cgroup_is_root(memcg)) {
5257 			if (do_memsw_account())
5258 				page_counter_uncharge(&memcg->memsw, nr_pages);
5259 			else
5260 				page_counter_uncharge(&memcg->swap, nr_pages);
5261 		}
5262 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5263 		mem_cgroup_id_put_many(memcg, nr_pages);
5264 	}
5265 	rcu_read_unlock();
5266 }
5267 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5268 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5269 {
5270 	long nr_swap_pages = get_nr_swap_pages();
5271 
5272 	if (mem_cgroup_disabled() || do_memsw_account())
5273 		return nr_swap_pages;
5274 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5275 		nr_swap_pages = min_t(long, nr_swap_pages,
5276 				      READ_ONCE(memcg->swap.max) -
5277 				      page_counter_read(&memcg->swap));
5278 	return nr_swap_pages;
5279 }
5280 
mem_cgroup_swap_full(struct folio * folio)5281 bool mem_cgroup_swap_full(struct folio *folio)
5282 {
5283 	struct mem_cgroup *memcg;
5284 
5285 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5286 
5287 	if (vm_swap_full())
5288 		return true;
5289 	if (do_memsw_account())
5290 		return false;
5291 
5292 	memcg = folio_memcg(folio);
5293 	if (!memcg)
5294 		return false;
5295 
5296 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5297 		unsigned long usage = page_counter_read(&memcg->swap);
5298 
5299 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5300 		    usage * 2 >= READ_ONCE(memcg->swap.max))
5301 			return true;
5302 	}
5303 
5304 	return false;
5305 }
5306 
setup_swap_account(char * s)5307 static int __init setup_swap_account(char *s)
5308 {
5309 	bool res;
5310 
5311 	if (!kstrtobool(s, &res) && !res)
5312 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5313 			     "in favor of configuring swap control via cgroupfs. "
5314 			     "Please report your usecase to linux-mm@kvack.org if you "
5315 			     "depend on this functionality.\n");
5316 	return 1;
5317 }
5318 __setup("swapaccount=", setup_swap_account);
5319 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5320 static u64 swap_current_read(struct cgroup_subsys_state *css,
5321 			     struct cftype *cft)
5322 {
5323 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5324 
5325 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5326 }
5327 
swap_peak_show(struct seq_file * sf,void * v)5328 static int swap_peak_show(struct seq_file *sf, void *v)
5329 {
5330 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5331 
5332 	return peak_show(sf, v, &memcg->swap);
5333 }
5334 
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5335 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5336 			       size_t nbytes, loff_t off)
5337 {
5338 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5339 
5340 	return peak_write(of, buf, nbytes, off, &memcg->swap,
5341 			  &memcg->swap_peaks);
5342 }
5343 
swap_high_show(struct seq_file * m,void * v)5344 static int swap_high_show(struct seq_file *m, void *v)
5345 {
5346 	return seq_puts_memcg_tunable(m,
5347 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5348 }
5349 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5350 static ssize_t swap_high_write(struct kernfs_open_file *of,
5351 			       char *buf, size_t nbytes, loff_t off)
5352 {
5353 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5354 	unsigned long high;
5355 	int err;
5356 
5357 	buf = strstrip(buf);
5358 	err = page_counter_memparse(buf, "max", &high);
5359 	if (err)
5360 		return err;
5361 
5362 	page_counter_set_high(&memcg->swap, high);
5363 
5364 	return nbytes;
5365 }
5366 
swap_max_show(struct seq_file * m,void * v)5367 static int swap_max_show(struct seq_file *m, void *v)
5368 {
5369 	return seq_puts_memcg_tunable(m,
5370 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5371 }
5372 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5373 static ssize_t swap_max_write(struct kernfs_open_file *of,
5374 			      char *buf, size_t nbytes, loff_t off)
5375 {
5376 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5377 	unsigned long max;
5378 	int err;
5379 
5380 	buf = strstrip(buf);
5381 	err = page_counter_memparse(buf, "max", &max);
5382 	if (err)
5383 		return err;
5384 
5385 	xchg(&memcg->swap.max, max);
5386 
5387 	return nbytes;
5388 }
5389 
swap_events_show(struct seq_file * m,void * v)5390 static int swap_events_show(struct seq_file *m, void *v)
5391 {
5392 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5393 
5394 	seq_printf(m, "high %lu\n",
5395 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5396 	seq_printf(m, "max %lu\n",
5397 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5398 	seq_printf(m, "fail %lu\n",
5399 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5400 
5401 	return 0;
5402 }
5403 
5404 static struct cftype swap_files[] = {
5405 	{
5406 		.name = "swap.current",
5407 		.flags = CFTYPE_NOT_ON_ROOT,
5408 		.read_u64 = swap_current_read,
5409 	},
5410 	{
5411 		.name = "swap.high",
5412 		.flags = CFTYPE_NOT_ON_ROOT,
5413 		.seq_show = swap_high_show,
5414 		.write = swap_high_write,
5415 	},
5416 	{
5417 		.name = "swap.max",
5418 		.flags = CFTYPE_NOT_ON_ROOT,
5419 		.seq_show = swap_max_show,
5420 		.write = swap_max_write,
5421 	},
5422 	{
5423 		.name = "swap.peak",
5424 		.flags = CFTYPE_NOT_ON_ROOT,
5425 		.open = peak_open,
5426 		.release = peak_release,
5427 		.seq_show = swap_peak_show,
5428 		.write = swap_peak_write,
5429 	},
5430 	{
5431 		.name = "swap.events",
5432 		.flags = CFTYPE_NOT_ON_ROOT,
5433 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5434 		.seq_show = swap_events_show,
5435 	},
5436 	{ }	/* terminate */
5437 };
5438 
5439 #ifdef CONFIG_ZSWAP
5440 /**
5441  * obj_cgroup_may_zswap - check if this cgroup can zswap
5442  * @objcg: the object cgroup
5443  *
5444  * Check if the hierarchical zswap limit has been reached.
5445  *
5446  * This doesn't check for specific headroom, and it is not atomic
5447  * either. But with zswap, the size of the allocation is only known
5448  * once compression has occurred, and this optimistic pre-check avoids
5449  * spending cycles on compression when there is already no room left
5450  * or zswap is disabled altogether somewhere in the hierarchy.
5451  */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5452 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5453 {
5454 	struct mem_cgroup *memcg, *original_memcg;
5455 	bool ret = true;
5456 
5457 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5458 		return true;
5459 
5460 	original_memcg = get_mem_cgroup_from_objcg(objcg);
5461 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5462 	     memcg = parent_mem_cgroup(memcg)) {
5463 		unsigned long max = READ_ONCE(memcg->zswap_max);
5464 		unsigned long pages;
5465 
5466 		if (max == PAGE_COUNTER_MAX)
5467 			continue;
5468 		if (max == 0) {
5469 			ret = false;
5470 			break;
5471 		}
5472 
5473 		/* Force flush to get accurate stats for charging */
5474 		__mem_cgroup_flush_stats(memcg, true);
5475 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5476 		if (pages < max)
5477 			continue;
5478 		ret = false;
5479 		break;
5480 	}
5481 	mem_cgroup_put(original_memcg);
5482 	return ret;
5483 }
5484 
5485 /**
5486  * obj_cgroup_charge_zswap - charge compression backend memory
5487  * @objcg: the object cgroup
5488  * @size: size of compressed object
5489  *
5490  * This forces the charge after obj_cgroup_may_zswap() allowed
5491  * compression and storage in zwap for this cgroup to go ahead.
5492  */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5493 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5494 {
5495 	struct mem_cgroup *memcg;
5496 
5497 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5498 		return;
5499 
5500 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5501 
5502 	/* PF_MEMALLOC context, charging must succeed */
5503 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5504 		VM_WARN_ON_ONCE(1);
5505 
5506 	rcu_read_lock();
5507 	memcg = obj_cgroup_memcg(objcg);
5508 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5509 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5510 	rcu_read_unlock();
5511 }
5512 
5513 /**
5514  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5515  * @objcg: the object cgroup
5516  * @size: size of compressed object
5517  *
5518  * Uncharges zswap memory on page in.
5519  */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5520 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5521 {
5522 	struct mem_cgroup *memcg;
5523 
5524 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5525 		return;
5526 
5527 	obj_cgroup_uncharge(objcg, size);
5528 
5529 	rcu_read_lock();
5530 	memcg = obj_cgroup_memcg(objcg);
5531 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5532 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5533 	rcu_read_unlock();
5534 }
5535 
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5536 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5537 {
5538 	/* if zswap is disabled, do not block pages going to the swapping device */
5539 	if (!zswap_is_enabled())
5540 		return true;
5541 
5542 	for (; memcg; memcg = parent_mem_cgroup(memcg))
5543 		if (!READ_ONCE(memcg->zswap_writeback))
5544 			return false;
5545 
5546 	return true;
5547 }
5548 
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5549 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5550 			      struct cftype *cft)
5551 {
5552 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5553 
5554 	mem_cgroup_flush_stats(memcg);
5555 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5556 }
5557 
zswap_max_show(struct seq_file * m,void * v)5558 static int zswap_max_show(struct seq_file *m, void *v)
5559 {
5560 	return seq_puts_memcg_tunable(m,
5561 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5562 }
5563 
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5564 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5565 			       char *buf, size_t nbytes, loff_t off)
5566 {
5567 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5568 	unsigned long max;
5569 	int err;
5570 
5571 	buf = strstrip(buf);
5572 	err = page_counter_memparse(buf, "max", &max);
5573 	if (err)
5574 		return err;
5575 
5576 	xchg(&memcg->zswap_max, max);
5577 
5578 	return nbytes;
5579 }
5580 
zswap_writeback_show(struct seq_file * m,void * v)5581 static int zswap_writeback_show(struct seq_file *m, void *v)
5582 {
5583 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5584 
5585 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5586 	return 0;
5587 }
5588 
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5589 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5590 				char *buf, size_t nbytes, loff_t off)
5591 {
5592 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5593 	int zswap_writeback;
5594 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5595 
5596 	if (parse_ret)
5597 		return parse_ret;
5598 
5599 	if (zswap_writeback != 0 && zswap_writeback != 1)
5600 		return -EINVAL;
5601 
5602 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5603 	return nbytes;
5604 }
5605 
5606 static struct cftype zswap_files[] = {
5607 	{
5608 		.name = "zswap.current",
5609 		.flags = CFTYPE_NOT_ON_ROOT,
5610 		.read_u64 = zswap_current_read,
5611 	},
5612 	{
5613 		.name = "zswap.max",
5614 		.flags = CFTYPE_NOT_ON_ROOT,
5615 		.seq_show = zswap_max_show,
5616 		.write = zswap_max_write,
5617 	},
5618 	{
5619 		.name = "zswap.writeback",
5620 		.seq_show = zswap_writeback_show,
5621 		.write = zswap_writeback_write,
5622 	},
5623 	{ }	/* terminate */
5624 };
5625 #endif /* CONFIG_ZSWAP */
5626 
mem_cgroup_swap_init(void)5627 static int __init mem_cgroup_swap_init(void)
5628 {
5629 	if (mem_cgroup_disabled())
5630 		return 0;
5631 
5632 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5633 #ifdef CONFIG_MEMCG_V1
5634 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5635 #endif
5636 #ifdef CONFIG_ZSWAP
5637 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5638 #endif
5639 	return 0;
5640 }
5641 subsys_initcall(mem_cgroup_swap_init);
5642 
5643 #endif /* CONFIG_SWAP */
5644 
mem_cgroup_node_allowed(struct mem_cgroup * memcg,int nid)5645 bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
5646 {
5647 	return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true;
5648 }
5649