1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/cpuset.h>
33 #include <linux/sched/mm.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/hugetlb.h>
36 #include <linux/pagemap.h>
37 #include <linux/folio_batch.h>
38 #include <linux/vm_event_item.h>
39 #include <linux/smp.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
42 #include <linux/bit_spinlock.h>
43 #include <linux/rcupdate.h>
44 #include <linux/limits.h>
45 #include <linux/export.h>
46 #include <linux/list.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swapops.h>
51 #include <linux/spinlock.h>
52 #include <linux/fs.h>
53 #include <linux/seq_file.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71
72 #include <linux/uaccess.h>
73
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77
78 #include <trace/events/vmscan.h>
79
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 EXPORT_SYMBOL(root_mem_cgroup);
85
86 /* Active memory cgroup to use from an interrupt context */
87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89
90 /* Socket memory accounting disabled? */
91 static bool cgroup_memory_nosocket __ro_after_init;
92
93 /* Kernel memory accounting disabled? */
94 static bool cgroup_memory_nokmem __ro_after_init;
95
96 /* BPF memory accounting disabled? */
97 static bool cgroup_memory_nobpf __ro_after_init;
98
99 static struct workqueue_struct *memcg_wq __ro_after_init;
100
101 static struct kmem_cache *memcg_cachep;
102 static struct kmem_cache *memcg_pn_cachep;
103
104 #ifdef CONFIG_CGROUP_WRITEBACK
105 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
106 #endif
107
task_is_dying(void)108 static inline bool task_is_dying(void)
109 {
110 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
111 (current->flags & PF_EXITING);
112 }
113
114 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)115 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
116 {
117 if (!memcg)
118 memcg = root_mem_cgroup;
119 return &memcg->vmpressure;
120 }
121
vmpressure_to_memcg(struct vmpressure * vmpr)122 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
123 {
124 return container_of(vmpr, struct mem_cgroup, vmpressure);
125 }
126
127 #define SEQ_BUF_SIZE SZ_4K
128 #define CURRENT_OBJCG_UPDATE_BIT 0
129 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
130
131 static DEFINE_SPINLOCK(objcg_lock);
132
mem_cgroup_kmem_disabled(void)133 bool mem_cgroup_kmem_disabled(void)
134 {
135 return cgroup_memory_nokmem;
136 }
137
138 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
139
obj_cgroup_release(struct percpu_ref * ref)140 static void obj_cgroup_release(struct percpu_ref *ref)
141 {
142 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
143 unsigned int nr_bytes;
144 unsigned int nr_pages;
145 unsigned long flags;
146
147 /*
148 * At this point all allocated objects are freed, and
149 * objcg->nr_charged_bytes can't have an arbitrary byte value.
150 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
151 *
152 * The following sequence can lead to it:
153 * 1) CPU0: objcg == stock->cached_objcg
154 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
155 * PAGE_SIZE bytes are charged
156 * 3) CPU1: a process from another memcg is allocating something,
157 * the stock if flushed,
158 * objcg->nr_charged_bytes = PAGE_SIZE - 92
159 * 5) CPU0: we do release this object,
160 * 92 bytes are added to stock->nr_bytes
161 * 6) CPU0: stock is flushed,
162 * 92 bytes are added to objcg->nr_charged_bytes
163 *
164 * In the result, nr_charged_bytes == PAGE_SIZE.
165 * This page will be uncharged in obj_cgroup_release().
166 */
167 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
168 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
169 nr_pages = nr_bytes >> PAGE_SHIFT;
170
171 if (nr_pages) {
172 struct mem_cgroup *memcg;
173
174 memcg = get_mem_cgroup_from_objcg(objcg);
175 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
176 memcg1_account_kmem(memcg, -nr_pages);
177 if (!mem_cgroup_is_root(memcg))
178 memcg_uncharge(memcg, nr_pages);
179 mem_cgroup_put(memcg);
180 }
181
182 spin_lock_irqsave(&objcg_lock, flags);
183 list_del(&objcg->list);
184 spin_unlock_irqrestore(&objcg_lock, flags);
185
186 percpu_ref_exit(ref);
187 kfree_rcu(objcg, rcu);
188 }
189
obj_cgroup_alloc(void)190 static struct obj_cgroup *obj_cgroup_alloc(void)
191 {
192 struct obj_cgroup *objcg;
193 int ret;
194
195 objcg = kzalloc_obj(struct obj_cgroup);
196 if (!objcg)
197 return NULL;
198
199 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
200 GFP_KERNEL);
201 if (ret) {
202 kfree(objcg);
203 return NULL;
204 }
205 INIT_LIST_HEAD(&objcg->list);
206 return objcg;
207 }
208
__memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent,int nid)209 static inline struct obj_cgroup *__memcg_reparent_objcgs(struct mem_cgroup *memcg,
210 struct mem_cgroup *parent,
211 int nid)
212 {
213 struct obj_cgroup *objcg, *iter;
214 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
215 struct mem_cgroup_per_node *parent_pn = parent->nodeinfo[nid];
216
217 objcg = rcu_replace_pointer(pn->objcg, NULL, true);
218 /* 1) Ready to reparent active objcg. */
219 list_add(&objcg->list, &pn->objcg_list);
220 /* 2) Reparent active objcg and already reparented objcgs to parent. */
221 list_for_each_entry(iter, &pn->objcg_list, list)
222 WRITE_ONCE(iter->memcg, parent);
223 /* 3) Move already reparented objcgs to the parent's list */
224 list_splice(&pn->objcg_list, &parent_pn->objcg_list);
225
226 return objcg;
227 }
228
229 #ifdef CONFIG_MEMCG_V1
230 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force);
231
reparent_state_local(struct mem_cgroup * memcg,struct mem_cgroup * parent)232 static inline void reparent_state_local(struct mem_cgroup *memcg, struct mem_cgroup *parent)
233 {
234 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
235 return;
236
237 /*
238 * Reparent stats exposed non-hierarchically. Flush @memcg's stats first
239 * to read its stats accurately , and conservatively flush @parent's
240 * stats after reparenting to avoid hiding a potentially large stat
241 * update (e.g. from callers of mem_cgroup_flush_stats_ratelimited()).
242 */
243 __mem_cgroup_flush_stats(memcg, true);
244
245 /* The following counts are all non-hierarchical and need to be reparented. */
246 reparent_memcg1_state_local(memcg, parent);
247 reparent_memcg1_lruvec_state_local(memcg, parent);
248
249 __mem_cgroup_flush_stats(parent, true);
250 }
251 #else
reparent_state_local(struct mem_cgroup * memcg,struct mem_cgroup * parent)252 static inline void reparent_state_local(struct mem_cgroup *memcg, struct mem_cgroup *parent)
253 {
254 }
255 #endif
256
reparent_locks(struct mem_cgroup * memcg,struct mem_cgroup * parent,int nid)257 static inline void reparent_locks(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid)
258 {
259 spin_lock_irq(&objcg_lock);
260 spin_lock_nested(&mem_cgroup_lruvec(memcg, NODE_DATA(nid))->lru_lock, 1);
261 spin_lock_nested(&mem_cgroup_lruvec(parent, NODE_DATA(nid))->lru_lock, 2);
262 }
263
reparent_unlocks(struct mem_cgroup * memcg,struct mem_cgroup * parent,int nid)264 static inline void reparent_unlocks(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid)
265 {
266 spin_unlock(&mem_cgroup_lruvec(parent, NODE_DATA(nid))->lru_lock);
267 spin_unlock(&mem_cgroup_lruvec(memcg, NODE_DATA(nid))->lru_lock);
268 spin_unlock_irq(&objcg_lock);
269 }
270
memcg_reparent_objcgs(struct mem_cgroup * memcg)271 static void memcg_reparent_objcgs(struct mem_cgroup *memcg)
272 {
273 struct obj_cgroup *objcg;
274 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
275 int nid;
276
277 for_each_node(nid) {
278 retry:
279 if (lru_gen_enabled())
280 max_lru_gen_memcg(parent, nid);
281
282 reparent_locks(memcg, parent, nid);
283
284 if (lru_gen_enabled()) {
285 if (!recheck_lru_gen_max_memcg(parent, nid)) {
286 reparent_unlocks(memcg, parent, nid);
287 cond_resched();
288 goto retry;
289 }
290 lru_gen_reparent_memcg(memcg, parent, nid);
291 } else {
292 lru_reparent_memcg(memcg, parent, nid);
293 }
294
295 objcg = __memcg_reparent_objcgs(memcg, parent, nid);
296
297 reparent_unlocks(memcg, parent, nid);
298
299 percpu_ref_kill(&objcg->refcnt);
300 }
301
302 reparent_state_local(memcg, parent);
303 }
304
305 /*
306 * A lot of the calls to the cache allocation functions are expected to be
307 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
308 * conditional to this static branch, we'll have to allow modules that does
309 * kmem_cache_alloc and the such to see this symbol as well
310 */
311 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
312 EXPORT_SYMBOL(memcg_kmem_online_key);
313
314 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
315 EXPORT_SYMBOL(memcg_bpf_enabled_key);
316
317 /**
318 * get_mem_cgroup_css_from_folio - acquire a css of the memcg associated with a folio
319 * @folio: folio of interest
320 *
321 * If memcg is bound to the default hierarchy, css of the memcg associated
322 * with @folio is returned. The returned css remains associated with @folio
323 * until it is released.
324 *
325 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
326 * is returned.
327 */
get_mem_cgroup_css_from_folio(struct folio * folio)328 struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio)
329 {
330 struct mem_cgroup *memcg;
331
332 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
333 return &root_mem_cgroup->css;
334
335 memcg = get_mem_cgroup_from_folio(folio);
336
337 return memcg ? &memcg->css : &root_mem_cgroup->css;
338 }
339
340 /**
341 * page_cgroup_ino - return inode number of the memcg a page is charged to
342 * @page: the page
343 *
344 * Look up the closest online ancestor of the memory cgroup @page is charged to
345 * and return its inode number or 0 if @page is not charged to any cgroup. It
346 * is safe to call this function without holding a reference to @page.
347 *
348 * Note, this function is inherently racy, because there is nothing to prevent
349 * the cgroup inode from getting torn down and potentially reallocated a moment
350 * after page_cgroup_ino() returns, so it only should be used by callers that
351 * do not care (such as procfs interfaces).
352 */
page_cgroup_ino(struct page * page)353 ino_t page_cgroup_ino(struct page *page)
354 {
355 struct mem_cgroup *memcg;
356 unsigned long ino = 0;
357
358 rcu_read_lock();
359 /* page_folio() is racy here, but the entire function is racy anyway */
360 memcg = folio_memcg_check(page_folio(page));
361
362 while (memcg && !css_is_online(&memcg->css))
363 memcg = parent_mem_cgroup(memcg);
364 if (memcg)
365 ino = cgroup_ino(memcg->css.cgroup);
366 rcu_read_unlock();
367 return ino;
368 }
369 EXPORT_SYMBOL_GPL(page_cgroup_ino);
370
371 /* Subset of node_stat_item for memcg stats */
372 static const unsigned int memcg_node_stat_items[] = {
373 NR_INACTIVE_ANON,
374 NR_ACTIVE_ANON,
375 NR_INACTIVE_FILE,
376 NR_ACTIVE_FILE,
377 NR_UNEVICTABLE,
378 NR_SLAB_RECLAIMABLE_B,
379 NR_SLAB_UNRECLAIMABLE_B,
380 WORKINGSET_REFAULT_ANON,
381 WORKINGSET_REFAULT_FILE,
382 WORKINGSET_ACTIVATE_ANON,
383 WORKINGSET_ACTIVATE_FILE,
384 WORKINGSET_RESTORE_ANON,
385 WORKINGSET_RESTORE_FILE,
386 WORKINGSET_NODERECLAIM,
387 NR_ANON_MAPPED,
388 NR_FILE_MAPPED,
389 NR_FILE_PAGES,
390 NR_FILE_DIRTY,
391 NR_WRITEBACK,
392 NR_SHMEM,
393 NR_SHMEM_THPS,
394 NR_FILE_THPS,
395 NR_ANON_THPS,
396 NR_VMALLOC,
397 NR_KERNEL_STACK_KB,
398 NR_PAGETABLE,
399 NR_SECONDARY_PAGETABLE,
400 #ifdef CONFIG_SWAP
401 NR_SWAPCACHE,
402 #endif
403 #ifdef CONFIG_NUMA_BALANCING
404 PGPROMOTE_SUCCESS,
405 #endif
406 PGDEMOTE_KSWAPD,
407 PGDEMOTE_DIRECT,
408 PGDEMOTE_KHUGEPAGED,
409 PGDEMOTE_PROACTIVE,
410 PGSTEAL_KSWAPD,
411 PGSTEAL_DIRECT,
412 PGSTEAL_KHUGEPAGED,
413 PGSTEAL_PROACTIVE,
414 PGSTEAL_ANON,
415 PGSTEAL_FILE,
416 PGSCAN_KSWAPD,
417 PGSCAN_DIRECT,
418 PGSCAN_KHUGEPAGED,
419 PGSCAN_PROACTIVE,
420 PGSCAN_ANON,
421 PGSCAN_FILE,
422 PGREFILL,
423 #ifdef CONFIG_HUGETLB_PAGE
424 NR_HUGETLB,
425 #endif
426 };
427
428 static const unsigned int memcg_stat_items[] = {
429 MEMCG_SWAP,
430 MEMCG_SOCK,
431 MEMCG_PERCPU_B,
432 MEMCG_KMEM,
433 MEMCG_ZSWAP_B,
434 MEMCG_ZSWAPPED,
435 MEMCG_ZSWAP_INCOMP,
436 };
437
438 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
439 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
440 ARRAY_SIZE(memcg_stat_items))
441 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
442 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
443
init_memcg_stats(void)444 static void init_memcg_stats(void)
445 {
446 u8 i, j = 0;
447
448 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
449
450 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
451
452 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
453 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
454
455 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
456 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
457 }
458
memcg_stats_index(int idx)459 static inline int memcg_stats_index(int idx)
460 {
461 return mem_cgroup_stats_index[idx];
462 }
463
464 struct lruvec_stats_percpu {
465 /* Local (CPU and cgroup) state */
466 long state[NR_MEMCG_NODE_STAT_ITEMS];
467
468 /* Delta calculation for lockless upward propagation */
469 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
470 };
471
472 struct lruvec_stats {
473 /* Aggregated (CPU and subtree) state */
474 long state[NR_MEMCG_NODE_STAT_ITEMS];
475
476 /* Non-hierarchical (CPU aggregated) state */
477 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
478
479 /* Pending child counts during tree propagation */
480 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
481 };
482
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)483 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
484 {
485 struct mem_cgroup_per_node *pn;
486 long x;
487 int i;
488
489 if (mem_cgroup_disabled())
490 return node_page_state(lruvec_pgdat(lruvec), idx);
491
492 i = memcg_stats_index(idx);
493 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
494 return 0;
495
496 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
497 x = READ_ONCE(pn->lruvec_stats->state[i]);
498 #ifdef CONFIG_SMP
499 if (x < 0)
500 x = 0;
501 #endif
502 return x;
503 }
504
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)505 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
506 enum node_stat_item idx)
507 {
508 struct mem_cgroup_per_node *pn;
509 long x;
510 int i;
511
512 if (mem_cgroup_disabled())
513 return node_page_state(lruvec_pgdat(lruvec), idx);
514
515 i = memcg_stats_index(idx);
516 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
517 return 0;
518
519 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
520 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
521 #ifdef CONFIG_SMP
522 if (x < 0)
523 x = 0;
524 #endif
525 return x;
526 }
527
528 #ifdef CONFIG_MEMCG_V1
529 static void __mod_memcg_lruvec_state(struct mem_cgroup_per_node *pn,
530 enum node_stat_item idx, long val);
531
reparent_memcg_lruvec_state_local(struct mem_cgroup * memcg,struct mem_cgroup * parent,int idx)532 void reparent_memcg_lruvec_state_local(struct mem_cgroup *memcg,
533 struct mem_cgroup *parent, int idx)
534 {
535 int nid;
536
537 for_each_node(nid) {
538 struct lruvec *child_lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
539 struct lruvec *parent_lruvec = mem_cgroup_lruvec(parent, NODE_DATA(nid));
540 unsigned long value = lruvec_page_state_local(child_lruvec, idx);
541 struct mem_cgroup_per_node *child_pn, *parent_pn;
542
543 child_pn = container_of(child_lruvec, struct mem_cgroup_per_node, lruvec);
544 parent_pn = container_of(parent_lruvec, struct mem_cgroup_per_node, lruvec);
545
546 __mod_memcg_lruvec_state(child_pn, idx, -value);
547 __mod_memcg_lruvec_state(parent_pn, idx, value);
548 }
549 }
550 #endif
551
552 /* Subset of vm_event_item to report for memcg event stats */
553 static const unsigned int memcg_vm_event_stat[] = {
554 #ifdef CONFIG_MEMCG_V1
555 PGPGIN,
556 PGPGOUT,
557 #endif
558 PSWPIN,
559 PSWPOUT,
560 PGFAULT,
561 PGMAJFAULT,
562 PGACTIVATE,
563 PGDEACTIVATE,
564 PGLAZYFREE,
565 PGLAZYFREED,
566 #ifdef CONFIG_SWAP
567 SWPIN_ZERO,
568 SWPOUT_ZERO,
569 #endif
570 #ifdef CONFIG_ZSWAP
571 ZSWPIN,
572 ZSWPOUT,
573 ZSWPWB,
574 #endif
575 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
576 THP_FAULT_ALLOC,
577 THP_COLLAPSE_ALLOC,
578 THP_SWPOUT,
579 THP_SWPOUT_FALLBACK,
580 #endif
581 #ifdef CONFIG_NUMA_BALANCING
582 NUMA_PAGE_MIGRATE,
583 NUMA_PTE_UPDATES,
584 NUMA_HINT_FAULTS,
585 #endif
586 };
587
588 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
589 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
590
init_memcg_events(void)591 static void init_memcg_events(void)
592 {
593 u8 i;
594
595 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
596
597 memset(mem_cgroup_events_index, U8_MAX,
598 sizeof(mem_cgroup_events_index));
599
600 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
601 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
602 }
603
memcg_events_index(enum vm_event_item idx)604 static inline int memcg_events_index(enum vm_event_item idx)
605 {
606 return mem_cgroup_events_index[idx];
607 }
608
609 struct memcg_vmstats_percpu {
610 /* Stats updates since the last flush */
611 unsigned long stats_updates;
612
613 /* Cached pointers for fast iteration in memcg_rstat_updated() */
614 struct memcg_vmstats_percpu __percpu *parent_pcpu;
615 struct memcg_vmstats *vmstats;
616
617 /* The above should fit a single cacheline for memcg_rstat_updated() */
618
619 /* Local (CPU and cgroup) page state & events */
620 long state[MEMCG_VMSTAT_SIZE];
621 unsigned long events[NR_MEMCG_EVENTS];
622
623 /* Delta calculation for lockless upward propagation */
624 long state_prev[MEMCG_VMSTAT_SIZE];
625 unsigned long events_prev[NR_MEMCG_EVENTS];
626 } ____cacheline_aligned;
627
628 struct memcg_vmstats {
629 /* Aggregated (CPU and subtree) page state & events */
630 long state[MEMCG_VMSTAT_SIZE];
631 unsigned long events[NR_MEMCG_EVENTS];
632
633 /* Non-hierarchical (CPU aggregated) page state & events */
634 long state_local[MEMCG_VMSTAT_SIZE];
635 unsigned long events_local[NR_MEMCG_EVENTS];
636
637 /* Pending child counts during tree propagation */
638 long state_pending[MEMCG_VMSTAT_SIZE];
639 unsigned long events_pending[NR_MEMCG_EVENTS];
640
641 /* Stats updates since the last flush */
642 atomic_long_t stats_updates;
643 };
644
645 /*
646 * memcg and lruvec stats flushing
647 *
648 * Many codepaths leading to stats update or read are performance sensitive and
649 * adding stats flushing in such codepaths is not desirable. So, to optimize the
650 * flushing the kernel does:
651 *
652 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
653 * rstat update tree grow unbounded.
654 *
655 * 2) Flush the stats synchronously on reader side only when there are more than
656 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
657 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
658 * only for 2 seconds due to (1).
659 */
660 static void flush_memcg_stats_dwork(struct work_struct *w);
661 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
662 static u64 flush_last_time;
663
664 #define FLUSH_TIME (2UL*HZ)
665
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)666 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
667 {
668 return atomic_long_read(&vmstats->stats_updates) >
669 MEMCG_CHARGE_BATCH * num_online_cpus();
670 }
671
memcg_rstat_updated(struct mem_cgroup * memcg,long val,int cpu)672 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, long val,
673 int cpu)
674 {
675 struct memcg_vmstats_percpu __percpu *statc_pcpu;
676 struct memcg_vmstats_percpu *statc;
677 unsigned long stats_updates;
678
679 if (!val)
680 return;
681
682 css_rstat_updated(&memcg->css, cpu);
683 statc_pcpu = memcg->vmstats_percpu;
684 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
685 statc = this_cpu_ptr(statc_pcpu);
686 /*
687 * If @memcg is already flushable then all its ancestors are
688 * flushable as well and also there is no need to increase
689 * stats_updates.
690 */
691 if (memcg_vmstats_needs_flush(statc->vmstats))
692 break;
693
694 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
695 abs(val));
696 if (stats_updates < MEMCG_CHARGE_BATCH)
697 continue;
698
699 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
700 atomic_long_add(stats_updates, &statc->vmstats->stats_updates);
701 }
702 }
703
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)704 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
705 {
706 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
707
708 trace_memcg_flush_stats(memcg, atomic_long_read(&memcg->vmstats->stats_updates),
709 force, needs_flush);
710
711 if (!force && !needs_flush)
712 return;
713
714 if (mem_cgroup_is_root(memcg))
715 WRITE_ONCE(flush_last_time, jiffies_64);
716
717 css_rstat_flush(&memcg->css);
718 }
719
720 /*
721 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
722 * @memcg: root of the subtree to flush
723 *
724 * Flushing is serialized by the underlying global rstat lock. There is also a
725 * minimum amount of work to be done even if there are no stat updates to flush.
726 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
727 * avoids unnecessary work and contention on the underlying lock.
728 */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)729 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
730 {
731 if (mem_cgroup_disabled())
732 return;
733
734 if (!memcg)
735 memcg = root_mem_cgroup;
736
737 __mem_cgroup_flush_stats(memcg, false);
738 }
739
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)740 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
741 {
742 /* Only flush if the periodic flusher is one full cycle late */
743 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
744 mem_cgroup_flush_stats(memcg);
745 }
746
flush_memcg_stats_dwork(struct work_struct * w)747 static void flush_memcg_stats_dwork(struct work_struct *w)
748 {
749 /*
750 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
751 * in latency-sensitive paths is as cheap as possible.
752 */
753 __mem_cgroup_flush_stats(root_mem_cgroup, true);
754 queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
755 }
756
memcg_page_state(struct mem_cgroup * memcg,int idx)757 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
758 {
759 long x;
760 int i = memcg_stats_index(idx);
761
762 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
763 return 0;
764
765 x = READ_ONCE(memcg->vmstats->state[i]);
766 #ifdef CONFIG_SMP
767 if (x < 0)
768 x = 0;
769 #endif
770 return x;
771 }
772
memcg_stat_item_valid(int idx)773 bool memcg_stat_item_valid(int idx)
774 {
775 if ((u32)idx >= MEMCG_NR_STAT)
776 return false;
777
778 return !BAD_STAT_IDX(memcg_stats_index(idx));
779 }
780
781 static int memcg_page_state_unit(int item);
782
783 /*
784 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
785 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
786 */
memcg_state_val_in_pages(int idx,long val)787 static long memcg_state_val_in_pages(int idx, long val)
788 {
789 int unit = memcg_page_state_unit(idx);
790 long res;
791
792 if (!val || unit == PAGE_SIZE)
793 return val;
794
795 /* Get the absolute value of (val * unit / PAGE_SIZE). */
796 res = mult_frac(abs(val), unit, PAGE_SIZE);
797 /* Round up zero values. */
798 res = res ? : 1;
799
800 return val < 0 ? -res : res;
801 }
802
803 #ifdef CONFIG_MEMCG_V1
804 /*
805 * Used in mod_memcg_state() and mod_memcg_lruvec_state() to avoid race with
806 * reparenting of non-hierarchical state_locals.
807 */
get_non_dying_memcg_start(struct mem_cgroup * memcg)808 static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg)
809 {
810 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
811 return memcg;
812
813 rcu_read_lock();
814
815 while (memcg_is_dying(memcg))
816 memcg = parent_mem_cgroup(memcg);
817
818 return memcg;
819 }
820
get_non_dying_memcg_end(void)821 static inline void get_non_dying_memcg_end(void)
822 {
823 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
824 return;
825
826 rcu_read_unlock();
827 }
828 #else
get_non_dying_memcg_start(struct mem_cgroup * memcg)829 static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg)
830 {
831 return memcg;
832 }
833
get_non_dying_memcg_end(void)834 static inline void get_non_dying_memcg_end(void)
835 {
836 }
837 #endif
838
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,long val)839 static void __mod_memcg_state(struct mem_cgroup *memcg,
840 enum memcg_stat_item idx, long val)
841 {
842 int i = memcg_stats_index(idx);
843 int cpu;
844
845 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
846 return;
847
848 cpu = get_cpu();
849
850 this_cpu_add(memcg->vmstats_percpu->state[i], val);
851 val = memcg_state_val_in_pages(idx, val);
852 memcg_rstat_updated(memcg, val, cpu);
853
854 trace_mod_memcg_state(memcg, idx, val);
855
856 put_cpu();
857 }
858
859 /**
860 * mod_memcg_state - update cgroup memory statistics
861 * @memcg: the memory cgroup
862 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
863 * @val: delta to add to the counter, can be negative
864 */
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)865 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
866 int val)
867 {
868 if (mem_cgroup_disabled())
869 return;
870
871 memcg = get_non_dying_memcg_start(memcg);
872 __mod_memcg_state(memcg, idx, val);
873 get_non_dying_memcg_end();
874 }
875
876 #ifdef CONFIG_MEMCG_V1
877 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)878 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
879 {
880 long x;
881 int i = memcg_stats_index(idx);
882
883 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
884 return 0;
885
886 x = READ_ONCE(memcg->vmstats->state_local[i]);
887 #ifdef CONFIG_SMP
888 if (x < 0)
889 x = 0;
890 #endif
891 return x;
892 }
893
reparent_memcg_state_local(struct mem_cgroup * memcg,struct mem_cgroup * parent,int idx)894 void reparent_memcg_state_local(struct mem_cgroup *memcg,
895 struct mem_cgroup *parent, int idx)
896 {
897 unsigned long value = memcg_page_state_local(memcg, idx);
898
899 __mod_memcg_state(memcg, idx, -value);
900 __mod_memcg_state(parent, idx, value);
901 }
902 #endif
903
__mod_memcg_lruvec_state(struct mem_cgroup_per_node * pn,enum node_stat_item idx,long val)904 static void __mod_memcg_lruvec_state(struct mem_cgroup_per_node *pn,
905 enum node_stat_item idx, long val)
906 {
907 struct mem_cgroup *memcg = pn->memcg;
908 int i = memcg_stats_index(idx);
909 int cpu;
910
911 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
912 return;
913
914 cpu = get_cpu();
915
916 /* Update memcg */
917 this_cpu_add(memcg->vmstats_percpu->state[i], val);
918
919 /* Update lruvec */
920 this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
921
922 val = memcg_state_val_in_pages(idx, val);
923 memcg_rstat_updated(memcg, val, cpu);
924 trace_mod_memcg_lruvec_state(memcg, idx, val);
925
926 put_cpu();
927 }
928
mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)929 static void mod_memcg_lruvec_state(struct lruvec *lruvec,
930 enum node_stat_item idx,
931 int val)
932 {
933 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
934 struct mem_cgroup_per_node *pn;
935 struct mem_cgroup *memcg;
936
937 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
938 memcg = get_non_dying_memcg_start(pn->memcg);
939 pn = memcg->nodeinfo[pgdat->node_id];
940
941 __mod_memcg_lruvec_state(pn, idx, val);
942
943 get_non_dying_memcg_end();
944 }
945
946 /**
947 * mod_lruvec_state - update lruvec memory statistics
948 * @lruvec: the lruvec
949 * @idx: the stat item
950 * @val: delta to add to the counter, can be negative
951 *
952 * The lruvec is the intersection of the NUMA node and a cgroup. This
953 * function updates the all three counters that are affected by a
954 * change of state at this level: per-node, per-cgroup, per-lruvec.
955 */
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)956 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
957 int val)
958 {
959 /* Update node */
960 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
961
962 /* Update memcg and lruvec */
963 if (!mem_cgroup_disabled())
964 mod_memcg_lruvec_state(lruvec, idx, val);
965 }
966
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)967 void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
968 int val)
969 {
970 struct mem_cgroup *memcg;
971 pg_data_t *pgdat = folio_pgdat(folio);
972 struct lruvec *lruvec;
973
974 rcu_read_lock();
975 memcg = folio_memcg(folio);
976 /* Untracked pages have no memcg, no lruvec. Update only the node */
977 if (!memcg) {
978 rcu_read_unlock();
979 mod_node_page_state(pgdat, idx, val);
980 return;
981 }
982
983 lruvec = mem_cgroup_lruvec(memcg, pgdat);
984 mod_lruvec_state(lruvec, idx, val);
985 rcu_read_unlock();
986 }
987 EXPORT_SYMBOL(lruvec_stat_mod_folio);
988
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)989 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
990 {
991 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
992 struct mem_cgroup *memcg;
993 struct lruvec *lruvec;
994
995 rcu_read_lock();
996 memcg = mem_cgroup_from_virt(p);
997
998 /*
999 * Untracked pages have no memcg, no lruvec. Update only the
1000 * node. If we reparent the slab objects to the root memcg,
1001 * when we free the slab object, we need to update the per-memcg
1002 * vmstats to keep it correct for the root memcg.
1003 */
1004 if (!memcg) {
1005 mod_node_page_state(pgdat, idx, val);
1006 } else {
1007 lruvec = mem_cgroup_lruvec(memcg, pgdat);
1008 mod_lruvec_state(lruvec, idx, val);
1009 }
1010 rcu_read_unlock();
1011 }
1012
1013 /**
1014 * count_memcg_events - account VM events in a cgroup
1015 * @memcg: the memory cgroup
1016 * @idx: the event item
1017 * @count: the number of events that occurred
1018 */
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1019 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1020 unsigned long count)
1021 {
1022 int i = memcg_events_index(idx);
1023 int cpu;
1024
1025 if (mem_cgroup_disabled())
1026 return;
1027
1028 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
1029 return;
1030
1031 cpu = get_cpu();
1032
1033 this_cpu_add(memcg->vmstats_percpu->events[i], count);
1034 memcg_rstat_updated(memcg, count, cpu);
1035 trace_count_memcg_events(memcg, idx, count);
1036
1037 put_cpu();
1038 }
1039
memcg_events(struct mem_cgroup * memcg,int event)1040 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
1041 {
1042 int i = memcg_events_index(event);
1043
1044 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
1045 return 0;
1046
1047 return READ_ONCE(memcg->vmstats->events[i]);
1048 }
1049
memcg_vm_event_item_valid(enum vm_event_item idx)1050 bool memcg_vm_event_item_valid(enum vm_event_item idx)
1051 {
1052 if (idx >= NR_VM_EVENT_ITEMS)
1053 return false;
1054
1055 return !BAD_STAT_IDX(memcg_events_index(idx));
1056 }
1057
1058 #ifdef CONFIG_MEMCG_V1
memcg_events_local(struct mem_cgroup * memcg,int event)1059 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
1060 {
1061 int i = memcg_events_index(event);
1062
1063 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
1064 return 0;
1065
1066 return READ_ONCE(memcg->vmstats->events_local[i]);
1067 }
1068 #endif
1069
mem_cgroup_from_task(struct task_struct * p)1070 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1071 {
1072 /*
1073 * mm_update_next_owner() may clear mm->owner to NULL
1074 * if it races with swapoff, page migration, etc.
1075 * So this can be called with p == NULL.
1076 */
1077 if (unlikely(!p))
1078 return NULL;
1079
1080 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1081 }
1082 EXPORT_SYMBOL(mem_cgroup_from_task);
1083
active_memcg(void)1084 static __always_inline struct mem_cgroup *active_memcg(void)
1085 {
1086 if (!in_task())
1087 return this_cpu_read(int_active_memcg);
1088 else
1089 return current->active_memcg;
1090 }
1091
1092 /**
1093 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1094 * @mm: mm from which memcg should be extracted. It can be NULL.
1095 *
1096 * Obtain a reference on mm->memcg and returns it if successful. If mm
1097 * is NULL, then the memcg is chosen as follows:
1098 * 1) The active memcg, if set.
1099 * 2) current->mm->memcg, if available
1100 * 3) root memcg
1101 * If mem_cgroup is disabled, NULL is returned.
1102 */
get_mem_cgroup_from_mm(struct mm_struct * mm)1103 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1104 {
1105 struct mem_cgroup *memcg;
1106
1107 if (mem_cgroup_disabled())
1108 return NULL;
1109
1110 /*
1111 * Page cache insertions can happen without an
1112 * actual mm context, e.g. during disk probing
1113 * on boot, loopback IO, acct() writes etc.
1114 *
1115 * No need to css_get on root memcg as the reference
1116 * counting is disabled on the root level in the
1117 * cgroup core. See CSS_NO_REF.
1118 */
1119 if (unlikely(!mm)) {
1120 memcg = active_memcg();
1121 if (unlikely(memcg)) {
1122 /* remote memcg must hold a ref */
1123 css_get(&memcg->css);
1124 return memcg;
1125 }
1126 mm = current->mm;
1127 if (unlikely(!mm))
1128 return root_mem_cgroup;
1129 }
1130
1131 rcu_read_lock();
1132 do {
1133 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1134 if (unlikely(!memcg))
1135 memcg = root_mem_cgroup;
1136 } while (!css_tryget(&memcg->css));
1137 rcu_read_unlock();
1138 return memcg;
1139 }
1140 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1141
1142 /**
1143 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1144 */
get_mem_cgroup_from_current(void)1145 struct mem_cgroup *get_mem_cgroup_from_current(void)
1146 {
1147 struct mem_cgroup *memcg;
1148
1149 if (mem_cgroup_disabled())
1150 return NULL;
1151
1152 again:
1153 rcu_read_lock();
1154 memcg = mem_cgroup_from_task(current);
1155 if (!css_tryget(&memcg->css)) {
1156 rcu_read_unlock();
1157 goto again;
1158 }
1159 rcu_read_unlock();
1160 return memcg;
1161 }
1162
1163 /**
1164 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
1165 * @folio: folio from which memcg should be extracted.
1166 *
1167 * See folio_memcg() for folio->objcg/memcg binding rules.
1168 */
get_mem_cgroup_from_folio(struct folio * folio)1169 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1170 {
1171 struct mem_cgroup *memcg;
1172
1173 if (mem_cgroup_disabled())
1174 return NULL;
1175
1176 if (!folio_memcg_charged(folio))
1177 return root_mem_cgroup;
1178
1179 rcu_read_lock();
1180 do {
1181 memcg = folio_memcg(folio);
1182 } while (unlikely(!css_tryget(&memcg->css)));
1183 rcu_read_unlock();
1184 return memcg;
1185 }
1186
1187 /**
1188 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1189 * @root: hierarchy root
1190 * @prev: previously returned memcg, NULL on first invocation
1191 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1192 *
1193 * Returns references to children of the hierarchy below @root, or
1194 * @root itself, or %NULL after a full round-trip.
1195 *
1196 * Caller must pass the return value in @prev on subsequent
1197 * invocations for reference counting, or use mem_cgroup_iter_break()
1198 * to cancel a hierarchy walk before the round-trip is complete.
1199 *
1200 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1201 * in the hierarchy among all concurrent reclaimers operating on the
1202 * same node.
1203 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1204 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1205 struct mem_cgroup *prev,
1206 struct mem_cgroup_reclaim_cookie *reclaim)
1207 {
1208 struct mem_cgroup_reclaim_iter *iter;
1209 struct cgroup_subsys_state *css;
1210 struct mem_cgroup *pos;
1211 struct mem_cgroup *next;
1212
1213 if (mem_cgroup_disabled())
1214 return NULL;
1215
1216 if (!root)
1217 root = root_mem_cgroup;
1218
1219 rcu_read_lock();
1220 restart:
1221 next = NULL;
1222
1223 if (reclaim) {
1224 int gen;
1225 int nid = reclaim->pgdat->node_id;
1226
1227 iter = &root->nodeinfo[nid]->iter;
1228 gen = atomic_read(&iter->generation);
1229
1230 /*
1231 * On start, join the current reclaim iteration cycle.
1232 * Exit when a concurrent walker completes it.
1233 */
1234 if (!prev)
1235 reclaim->generation = gen;
1236 else if (reclaim->generation != gen)
1237 goto out_unlock;
1238
1239 pos = READ_ONCE(iter->position);
1240 } else
1241 pos = prev;
1242
1243 css = pos ? &pos->css : NULL;
1244
1245 while ((css = css_next_descendant_pre(css, &root->css))) {
1246 /*
1247 * Verify the css and acquire a reference. The root
1248 * is provided by the caller, so we know it's alive
1249 * and kicking, and don't take an extra reference.
1250 */
1251 if (css == &root->css || css_tryget(css))
1252 break;
1253 }
1254
1255 next = mem_cgroup_from_css(css);
1256
1257 if (reclaim) {
1258 /*
1259 * The position could have already been updated by a competing
1260 * thread, so check that the value hasn't changed since we read
1261 * it to avoid reclaiming from the same cgroup twice.
1262 */
1263 if (cmpxchg(&iter->position, pos, next) != pos) {
1264 if (css && css != &root->css)
1265 css_put(css);
1266 goto restart;
1267 }
1268
1269 if (!next) {
1270 atomic_inc(&iter->generation);
1271
1272 /*
1273 * Reclaimers share the hierarchy walk, and a
1274 * new one might jump in right at the end of
1275 * the hierarchy - make sure they see at least
1276 * one group and restart from the beginning.
1277 */
1278 if (!prev)
1279 goto restart;
1280 }
1281 }
1282
1283 out_unlock:
1284 rcu_read_unlock();
1285 if (prev && prev != root)
1286 css_put(&prev->css);
1287
1288 return next;
1289 }
1290
1291 /**
1292 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1293 * @root: hierarchy root
1294 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1295 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1296 void mem_cgroup_iter_break(struct mem_cgroup *root,
1297 struct mem_cgroup *prev)
1298 {
1299 if (!root)
1300 root = root_mem_cgroup;
1301 if (prev && prev != root)
1302 css_put(&prev->css);
1303 }
1304
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1305 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1306 struct mem_cgroup *dead_memcg)
1307 {
1308 struct mem_cgroup_reclaim_iter *iter;
1309 struct mem_cgroup_per_node *mz;
1310 int nid;
1311
1312 for_each_node(nid) {
1313 mz = from->nodeinfo[nid];
1314 iter = &mz->iter;
1315 cmpxchg(&iter->position, dead_memcg, NULL);
1316 }
1317 }
1318
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1319 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1320 {
1321 struct mem_cgroup *memcg = dead_memcg;
1322 struct mem_cgroup *last;
1323
1324 do {
1325 __invalidate_reclaim_iterators(memcg, dead_memcg);
1326 last = memcg;
1327 } while ((memcg = parent_mem_cgroup(memcg)));
1328
1329 /*
1330 * When cgroup1 non-hierarchy mode is used,
1331 * parent_mem_cgroup() does not walk all the way up to the
1332 * cgroup root (root_mem_cgroup). So we have to handle
1333 * dead_memcg from cgroup root separately.
1334 */
1335 if (!mem_cgroup_is_root(last))
1336 __invalidate_reclaim_iterators(root_mem_cgroup,
1337 dead_memcg);
1338 }
1339
1340 /**
1341 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1342 * @memcg: hierarchy root
1343 * @fn: function to call for each task
1344 * @arg: argument passed to @fn
1345 *
1346 * This function iterates over tasks attached to @memcg or to any of its
1347 * descendants and calls @fn for each task. If @fn returns a non-zero
1348 * value, the function breaks the iteration loop. Otherwise, it will iterate
1349 * over all tasks and return 0.
1350 *
1351 * This function must not be called for the root memory cgroup.
1352 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1353 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1354 int (*fn)(struct task_struct *, void *), void *arg)
1355 {
1356 struct mem_cgroup *iter;
1357 int ret = 0;
1358
1359 BUG_ON(mem_cgroup_is_root(memcg));
1360
1361 for_each_mem_cgroup_tree(iter, memcg) {
1362 struct css_task_iter it;
1363 struct task_struct *task;
1364
1365 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1366 while (!ret && (task = css_task_iter_next(&it))) {
1367 ret = fn(task, arg);
1368 /* Avoid potential softlockup warning */
1369 cond_resched();
1370 }
1371 css_task_iter_end(&it);
1372 if (ret) {
1373 mem_cgroup_iter_break(memcg, iter);
1374 break;
1375 }
1376 }
1377 }
1378
1379 /**
1380 * folio_lruvec_lock - Lock the lruvec for a folio.
1381 * @folio: Pointer to the folio.
1382 *
1383 * These functions are safe to use under any of the following conditions:
1384 * - folio locked
1385 * - folio_test_lru false
1386 * - folio frozen (refcount of 0)
1387 *
1388 * Return: The lruvec this folio is on with its lock held and rcu read lock held.
1389 */
folio_lruvec_lock(struct folio * folio)1390 struct lruvec *folio_lruvec_lock(struct folio *folio)
1391 {
1392 struct lruvec *lruvec;
1393
1394 rcu_read_lock();
1395 retry:
1396 lruvec = folio_lruvec(folio);
1397 spin_lock(&lruvec->lru_lock);
1398 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1399 spin_unlock(&lruvec->lru_lock);
1400 goto retry;
1401 }
1402
1403 return lruvec;
1404 }
1405
1406 /**
1407 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1408 * @folio: Pointer to the folio.
1409 *
1410 * These functions are safe to use under any of the following conditions:
1411 * - folio locked
1412 * - folio_test_lru false
1413 * - folio frozen (refcount of 0)
1414 *
1415 * Return: The lruvec this folio is on with its lock held and interrupts
1416 * disabled and rcu read lock held.
1417 */
folio_lruvec_lock_irq(struct folio * folio)1418 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1419 {
1420 struct lruvec *lruvec;
1421
1422 rcu_read_lock();
1423 retry:
1424 lruvec = folio_lruvec(folio);
1425 spin_lock_irq(&lruvec->lru_lock);
1426 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1427 spin_unlock_irq(&lruvec->lru_lock);
1428 goto retry;
1429 }
1430
1431 return lruvec;
1432 }
1433
1434 /**
1435 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1436 * @folio: Pointer to the folio.
1437 * @flags: Pointer to irqsave flags.
1438 *
1439 * These functions are safe to use under any of the following conditions:
1440 * - folio locked
1441 * - folio_test_lru false
1442 * - folio frozen (refcount of 0)
1443 *
1444 * Return: The lruvec this folio is on with its lock held and interrupts
1445 * disabled and rcu read lock held.
1446 */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1447 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1448 unsigned long *flags)
1449 {
1450 struct lruvec *lruvec;
1451
1452 rcu_read_lock();
1453 retry:
1454 lruvec = folio_lruvec(folio);
1455 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1456 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1457 spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
1458 goto retry;
1459 }
1460
1461 return lruvec;
1462 }
1463
1464 /**
1465 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1466 * @lruvec: mem_cgroup per zone lru vector
1467 * @lru: index of lru list the page is sitting on
1468 * @zid: zone id of the accounted pages
1469 * @nr_pages: positive when adding or negative when removing
1470 *
1471 * This function must be called under lru_lock, just before a page is added
1472 * to or just after a page is removed from an lru list.
1473 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,long nr_pages)1474 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1475 int zid, long nr_pages)
1476 {
1477 struct mem_cgroup_per_node *mz;
1478 unsigned long *lru_size;
1479 long size;
1480
1481 if (mem_cgroup_disabled())
1482 return;
1483
1484 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1485 lru_size = &mz->lru_zone_size[zid][lru];
1486
1487 if (nr_pages < 0)
1488 *lru_size += nr_pages;
1489
1490 size = *lru_size;
1491 if (WARN_ONCE(size < 0,
1492 "%s(%p, %d, %ld): lru_size %ld\n",
1493 __func__, lruvec, lru, nr_pages, size)) {
1494 VM_BUG_ON(1);
1495 *lru_size = 0;
1496 }
1497
1498 if (nr_pages > 0)
1499 *lru_size += nr_pages;
1500 }
1501
1502 /**
1503 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1504 * @memcg: the memory cgroup
1505 *
1506 * Returns the maximum amount of memory @mem can be charged with, in
1507 * pages.
1508 */
mem_cgroup_margin(struct mem_cgroup * memcg)1509 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1510 {
1511 unsigned long margin = 0;
1512 unsigned long count;
1513 unsigned long limit;
1514
1515 count = page_counter_read(&memcg->memory);
1516 limit = READ_ONCE(memcg->memory.max);
1517 if (count < limit)
1518 margin = limit - count;
1519
1520 if (do_memsw_account()) {
1521 count = page_counter_read(&memcg->memsw);
1522 limit = READ_ONCE(memcg->memsw.max);
1523 if (count < limit)
1524 margin = min(margin, limit - count);
1525 else
1526 margin = 0;
1527 }
1528
1529 return margin;
1530 }
1531
1532 struct memory_stat {
1533 const char *name;
1534 unsigned int idx;
1535 };
1536
1537 static const struct memory_stat memory_stats[] = {
1538 { "anon", NR_ANON_MAPPED },
1539 { "file", NR_FILE_PAGES },
1540 { "kernel", MEMCG_KMEM },
1541 { "kernel_stack", NR_KERNEL_STACK_KB },
1542 { "pagetables", NR_PAGETABLE },
1543 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1544 { "percpu", MEMCG_PERCPU_B },
1545 { "sock", MEMCG_SOCK },
1546 { "vmalloc", NR_VMALLOC },
1547 { "shmem", NR_SHMEM },
1548 #ifdef CONFIG_ZSWAP
1549 { "zswap", MEMCG_ZSWAP_B },
1550 { "zswapped", MEMCG_ZSWAPPED },
1551 { "zswap_incomp", MEMCG_ZSWAP_INCOMP },
1552 #endif
1553 { "file_mapped", NR_FILE_MAPPED },
1554 { "file_dirty", NR_FILE_DIRTY },
1555 { "file_writeback", NR_WRITEBACK },
1556 #ifdef CONFIG_SWAP
1557 { "swapcached", NR_SWAPCACHE },
1558 #endif
1559 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1560 { "anon_thp", NR_ANON_THPS },
1561 { "file_thp", NR_FILE_THPS },
1562 { "shmem_thp", NR_SHMEM_THPS },
1563 #endif
1564 { "inactive_anon", NR_INACTIVE_ANON },
1565 { "active_anon", NR_ACTIVE_ANON },
1566 { "inactive_file", NR_INACTIVE_FILE },
1567 { "active_file", NR_ACTIVE_FILE },
1568 { "unevictable", NR_UNEVICTABLE },
1569 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1570 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1571 #ifdef CONFIG_HUGETLB_PAGE
1572 { "hugetlb", NR_HUGETLB },
1573 #endif
1574
1575 /* The memory events */
1576 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1577 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1578 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1579 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1580 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1581 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1582 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1583
1584 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1585 { "pgdemote_direct", PGDEMOTE_DIRECT },
1586 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1587 { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
1588 { "pgsteal_kswapd", PGSTEAL_KSWAPD },
1589 { "pgsteal_direct", PGSTEAL_DIRECT },
1590 { "pgsteal_khugepaged", PGSTEAL_KHUGEPAGED },
1591 { "pgsteal_proactive", PGSTEAL_PROACTIVE },
1592 { "pgscan_kswapd", PGSCAN_KSWAPD },
1593 { "pgscan_direct", PGSCAN_DIRECT },
1594 { "pgscan_khugepaged", PGSCAN_KHUGEPAGED },
1595 { "pgscan_proactive", PGSCAN_PROACTIVE },
1596 { "pgrefill", PGREFILL },
1597 #ifdef CONFIG_NUMA_BALANCING
1598 { "pgpromote_success", PGPROMOTE_SUCCESS },
1599 #endif
1600 };
1601
1602 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1603 static int memcg_page_state_unit(int item)
1604 {
1605 switch (item) {
1606 case MEMCG_PERCPU_B:
1607 case MEMCG_ZSWAP_B:
1608 case NR_SLAB_RECLAIMABLE_B:
1609 case NR_SLAB_UNRECLAIMABLE_B:
1610 return 1;
1611 case NR_KERNEL_STACK_KB:
1612 return SZ_1K;
1613 default:
1614 return PAGE_SIZE;
1615 }
1616 }
1617
1618 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1619 static int memcg_page_state_output_unit(int item)
1620 {
1621 /*
1622 * Workingset state is actually in pages, but we export it to userspace
1623 * as a scalar count of events, so special case it here.
1624 *
1625 * Demotion and promotion activities are exported in pages, consistent
1626 * with their global counterparts.
1627 */
1628 switch (item) {
1629 case WORKINGSET_REFAULT_ANON:
1630 case WORKINGSET_REFAULT_FILE:
1631 case WORKINGSET_ACTIVATE_ANON:
1632 case WORKINGSET_ACTIVATE_FILE:
1633 case WORKINGSET_RESTORE_ANON:
1634 case WORKINGSET_RESTORE_FILE:
1635 case WORKINGSET_NODERECLAIM:
1636 case PGDEMOTE_KSWAPD:
1637 case PGDEMOTE_DIRECT:
1638 case PGDEMOTE_KHUGEPAGED:
1639 case PGDEMOTE_PROACTIVE:
1640 case PGSTEAL_KSWAPD:
1641 case PGSTEAL_DIRECT:
1642 case PGSTEAL_KHUGEPAGED:
1643 case PGSTEAL_PROACTIVE:
1644 case PGSCAN_KSWAPD:
1645 case PGSCAN_DIRECT:
1646 case PGSCAN_KHUGEPAGED:
1647 case PGSCAN_PROACTIVE:
1648 case PGREFILL:
1649 #ifdef CONFIG_NUMA_BALANCING
1650 case PGPROMOTE_SUCCESS:
1651 #endif
1652 return 1;
1653 default:
1654 return memcg_page_state_unit(item);
1655 }
1656 }
1657
memcg_page_state_output(struct mem_cgroup * memcg,int item)1658 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1659 {
1660 return memcg_page_state(memcg, item) *
1661 memcg_page_state_output_unit(item);
1662 }
1663
1664 #ifdef CONFIG_MEMCG_V1
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1665 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1666 {
1667 return memcg_page_state_local(memcg, item) *
1668 memcg_page_state_output_unit(item);
1669 }
1670 #endif
1671
1672 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1673 static bool memcg_accounts_hugetlb(void)
1674 {
1675 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1676 }
1677 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1678 static bool memcg_accounts_hugetlb(void)
1679 {
1680 return false;
1681 }
1682 #endif /* CONFIG_HUGETLB_PAGE */
1683
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1684 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1685 {
1686 int i;
1687
1688 /*
1689 * Provide statistics on the state of the memory subsystem as
1690 * well as cumulative event counters that show past behavior.
1691 *
1692 * This list is ordered following a combination of these gradients:
1693 * 1) generic big picture -> specifics and details
1694 * 2) reflecting userspace activity -> reflecting kernel heuristics
1695 *
1696 * Current memory state:
1697 */
1698 mem_cgroup_flush_stats(memcg);
1699
1700 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1701 u64 size;
1702
1703 #ifdef CONFIG_HUGETLB_PAGE
1704 if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1705 !memcg_accounts_hugetlb())
1706 continue;
1707 #endif
1708 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1709 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1710
1711 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1712 size += memcg_page_state_output(memcg,
1713 NR_SLAB_RECLAIMABLE_B);
1714 seq_buf_printf(s, "slab %llu\n", size);
1715 }
1716 }
1717
1718 /* Accumulated memory events */
1719 seq_buf_printf(s, "pgscan %lu\n",
1720 memcg_page_state(memcg, PGSCAN_KSWAPD) +
1721 memcg_page_state(memcg, PGSCAN_DIRECT) +
1722 memcg_page_state(memcg, PGSCAN_PROACTIVE) +
1723 memcg_page_state(memcg, PGSCAN_KHUGEPAGED));
1724 seq_buf_printf(s, "pgsteal %lu\n",
1725 memcg_page_state(memcg, PGSTEAL_KSWAPD) +
1726 memcg_page_state(memcg, PGSTEAL_DIRECT) +
1727 memcg_page_state(memcg, PGSTEAL_PROACTIVE) +
1728 memcg_page_state(memcg, PGSTEAL_KHUGEPAGED));
1729
1730 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1731 #ifdef CONFIG_MEMCG_V1
1732 if (memcg_vm_event_stat[i] == PGPGIN ||
1733 memcg_vm_event_stat[i] == PGPGOUT)
1734 continue;
1735 #endif
1736 seq_buf_printf(s, "%s %lu\n",
1737 vm_event_name(memcg_vm_event_stat[i]),
1738 memcg_events(memcg, memcg_vm_event_stat[i]));
1739 }
1740 }
1741
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1742 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1743 {
1744 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1745 memcg_stat_format(memcg, s);
1746 else
1747 memcg1_stat_format(memcg, s);
1748 if (seq_buf_has_overflowed(s))
1749 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1750 }
1751
1752 /**
1753 * mem_cgroup_print_oom_context: Print OOM information relevant to
1754 * memory controller.
1755 * @memcg: The memory cgroup that went over limit
1756 * @p: Task that is going to be killed
1757 *
1758 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1759 * enabled
1760 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1761 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1762 {
1763 rcu_read_lock();
1764
1765 if (memcg) {
1766 pr_cont(",oom_memcg=");
1767 pr_cont_cgroup_path(memcg->css.cgroup);
1768 } else
1769 pr_cont(",global_oom");
1770 if (p) {
1771 pr_cont(",task_memcg=");
1772 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1773 }
1774 rcu_read_unlock();
1775 }
1776
1777 /**
1778 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1779 * memory controller.
1780 * @memcg: The memory cgroup that went over limit
1781 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1782 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1783 {
1784 /* Use static buffer, for the caller is holding oom_lock. */
1785 static char buf[SEQ_BUF_SIZE];
1786 struct seq_buf s;
1787 unsigned long memory_failcnt;
1788
1789 lockdep_assert_held(&oom_lock);
1790
1791 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1792 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1793 else
1794 memory_failcnt = memcg->memory.failcnt;
1795
1796 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1797 K((u64)page_counter_read(&memcg->memory)),
1798 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1799 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1800 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1801 K((u64)page_counter_read(&memcg->swap)),
1802 K((u64)READ_ONCE(memcg->swap.max)),
1803 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1804 #ifdef CONFIG_MEMCG_V1
1805 else {
1806 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1807 K((u64)page_counter_read(&memcg->memsw)),
1808 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1809 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1810 K((u64)page_counter_read(&memcg->kmem)),
1811 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1812 }
1813 #endif
1814
1815 pr_info("Memory cgroup stats for ");
1816 pr_cont_cgroup_path(memcg->css.cgroup);
1817 pr_cont(":");
1818 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1819 memory_stat_format(memcg, &s);
1820 seq_buf_do_printk(&s, KERN_INFO);
1821 }
1822
1823 /*
1824 * Return the memory (and swap, if configured) limit for a memcg.
1825 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1826 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1827 {
1828 unsigned long max = READ_ONCE(memcg->memory.max);
1829
1830 if (do_memsw_account()) {
1831 if (mem_cgroup_swappiness(memcg)) {
1832 /* Calculate swap excess capacity from memsw limit */
1833 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1834
1835 max += min(swap, (unsigned long)total_swap_pages);
1836 }
1837 } else {
1838 if (mem_cgroup_swappiness(memcg))
1839 max += min(READ_ONCE(memcg->swap.max),
1840 (unsigned long)total_swap_pages);
1841 }
1842 return max;
1843 }
1844
__memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event,bool allow_spinning)1845 void __memcg_memory_event(struct mem_cgroup *memcg,
1846 enum memcg_memory_event event, bool allow_spinning)
1847 {
1848 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1849 event == MEMCG_SWAP_FAIL;
1850
1851 /* For now only MEMCG_MAX can happen with !allow_spinning context. */
1852 VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
1853
1854 atomic_long_inc(&memcg->memory_events_local[event]);
1855 if (!swap_event && allow_spinning)
1856 cgroup_file_notify(&memcg->events_local_file);
1857
1858 do {
1859 atomic_long_inc(&memcg->memory_events[event]);
1860 if (allow_spinning) {
1861 if (swap_event)
1862 cgroup_file_notify(&memcg->swap_events_file);
1863 else
1864 cgroup_file_notify(&memcg->events_file);
1865 }
1866
1867 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1868 break;
1869 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1870 break;
1871 } while ((memcg = parent_mem_cgroup(memcg)) &&
1872 !mem_cgroup_is_root(memcg));
1873 }
1874 EXPORT_SYMBOL_GPL(__memcg_memory_event);
1875
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1876 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1877 int order)
1878 {
1879 struct oom_control oc = {
1880 .zonelist = NULL,
1881 .nodemask = NULL,
1882 .memcg = memcg,
1883 .gfp_mask = gfp_mask,
1884 .order = order,
1885 };
1886 bool ret = true;
1887
1888 if (mutex_lock_killable(&oom_lock))
1889 return true;
1890
1891 if (mem_cgroup_margin(memcg) >= (1 << order))
1892 goto unlock;
1893
1894 /*
1895 * A few threads which were not waiting at mutex_lock_killable() can
1896 * fail to bail out. Therefore, check again after holding oom_lock.
1897 */
1898 ret = out_of_memory(&oc);
1899
1900 unlock:
1901 mutex_unlock(&oom_lock);
1902 return ret;
1903 }
1904
1905 /*
1906 * Returns true if successfully killed one or more processes. Though in some
1907 * corner cases it can return true even without killing any process.
1908 */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1909 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1910 {
1911 bool locked, ret;
1912
1913 if (order > PAGE_ALLOC_COSTLY_ORDER)
1914 return false;
1915
1916 memcg_memory_event(memcg, MEMCG_OOM);
1917
1918 if (!memcg1_oom_prepare(memcg, &locked))
1919 return false;
1920
1921 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1922
1923 memcg1_oom_finish(memcg, locked);
1924
1925 return ret;
1926 }
1927
1928 /**
1929 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1930 * @victim: task to be killed by the OOM killer
1931 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1932 *
1933 * Returns a pointer to a memory cgroup, which has to be cleaned up
1934 * by killing all belonging OOM-killable tasks.
1935 *
1936 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1937 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1938 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1939 struct mem_cgroup *oom_domain)
1940 {
1941 struct mem_cgroup *oom_group = NULL;
1942 struct mem_cgroup *memcg;
1943
1944 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1945 return NULL;
1946
1947 if (!oom_domain)
1948 oom_domain = root_mem_cgroup;
1949
1950 rcu_read_lock();
1951
1952 memcg = mem_cgroup_from_task(victim);
1953 if (mem_cgroup_is_root(memcg))
1954 goto out;
1955
1956 /*
1957 * If the victim task has been asynchronously moved to a different
1958 * memory cgroup, we might end up killing tasks outside oom_domain.
1959 * In this case it's better to ignore memory.group.oom.
1960 */
1961 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1962 goto out;
1963
1964 /*
1965 * Traverse the memory cgroup hierarchy from the victim task's
1966 * cgroup up to the OOMing cgroup (or root) to find the
1967 * highest-level memory cgroup with oom.group set.
1968 */
1969 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1970 if (READ_ONCE(memcg->oom_group))
1971 oom_group = memcg;
1972
1973 if (memcg == oom_domain)
1974 break;
1975 }
1976
1977 if (oom_group)
1978 css_get(&oom_group->css);
1979 out:
1980 rcu_read_unlock();
1981
1982 return oom_group;
1983 }
1984
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1985 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1986 {
1987 pr_info("Tasks in ");
1988 pr_cont_cgroup_path(memcg->css.cgroup);
1989 pr_cont(" are going to be killed due to memory.oom.group set\n");
1990 }
1991
1992 /*
1993 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
1994 * nr_pages in a single cacheline. This may change in future.
1995 */
1996 #define NR_MEMCG_STOCK 7
1997 #define FLUSHING_CACHED_CHARGE 0
1998 struct memcg_stock_pcp {
1999 local_trylock_t lock;
2000 uint8_t nr_pages[NR_MEMCG_STOCK];
2001 struct mem_cgroup *cached[NR_MEMCG_STOCK];
2002
2003 struct work_struct work;
2004 unsigned long flags;
2005 };
2006
2007 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
2008 .lock = INIT_LOCAL_TRYLOCK(lock),
2009 };
2010
2011 struct obj_stock_pcp {
2012 local_trylock_t lock;
2013 unsigned int nr_bytes;
2014 struct obj_cgroup *cached_objcg;
2015 struct pglist_data *cached_pgdat;
2016 int nr_slab_reclaimable_b;
2017 int nr_slab_unreclaimable_b;
2018
2019 struct work_struct work;
2020 unsigned long flags;
2021 };
2022
2023 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
2024 .lock = INIT_LOCAL_TRYLOCK(lock),
2025 };
2026
2027 static DEFINE_MUTEX(percpu_charge_mutex);
2028
2029 static void drain_obj_stock(struct obj_stock_pcp *stock);
2030 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
2031 struct mem_cgroup *root_memcg);
2032
2033 /**
2034 * consume_stock: Try to consume stocked charge on this cpu.
2035 * @memcg: memcg to consume from.
2036 * @nr_pages: how many pages to charge.
2037 *
2038 * Consume the cached charge if enough nr_pages are present otherwise return
2039 * failure. Also return failure for charge request larger than
2040 * MEMCG_CHARGE_BATCH or if the local lock is already taken.
2041 *
2042 * returns true if successful, false otherwise.
2043 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2044 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2045 {
2046 struct memcg_stock_pcp *stock;
2047 uint8_t stock_pages;
2048 bool ret = false;
2049 int i;
2050
2051 if (nr_pages > MEMCG_CHARGE_BATCH ||
2052 !local_trylock(&memcg_stock.lock))
2053 return ret;
2054
2055 stock = this_cpu_ptr(&memcg_stock);
2056
2057 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2058 if (memcg != READ_ONCE(stock->cached[i]))
2059 continue;
2060
2061 stock_pages = READ_ONCE(stock->nr_pages[i]);
2062 if (stock_pages >= nr_pages) {
2063 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
2064 ret = true;
2065 }
2066 break;
2067 }
2068
2069 local_unlock(&memcg_stock.lock);
2070
2071 return ret;
2072 }
2073
memcg_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)2074 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
2075 {
2076 page_counter_uncharge(&memcg->memory, nr_pages);
2077 if (do_memsw_account())
2078 page_counter_uncharge(&memcg->memsw, nr_pages);
2079 }
2080
2081 /*
2082 * Returns stocks cached in percpu and reset cached information.
2083 */
drain_stock(struct memcg_stock_pcp * stock,int i)2084 static void drain_stock(struct memcg_stock_pcp *stock, int i)
2085 {
2086 struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
2087 uint8_t stock_pages;
2088
2089 if (!old)
2090 return;
2091
2092 stock_pages = READ_ONCE(stock->nr_pages[i]);
2093 if (stock_pages) {
2094 memcg_uncharge(old, stock_pages);
2095 WRITE_ONCE(stock->nr_pages[i], 0);
2096 }
2097
2098 css_put(&old->css);
2099 WRITE_ONCE(stock->cached[i], NULL);
2100 }
2101
drain_stock_fully(struct memcg_stock_pcp * stock)2102 static void drain_stock_fully(struct memcg_stock_pcp *stock)
2103 {
2104 int i;
2105
2106 for (i = 0; i < NR_MEMCG_STOCK; ++i)
2107 drain_stock(stock, i);
2108 }
2109
drain_local_memcg_stock(struct work_struct * dummy)2110 static void drain_local_memcg_stock(struct work_struct *dummy)
2111 {
2112 struct memcg_stock_pcp *stock;
2113
2114 if (WARN_ONCE(!in_task(), "drain in non-task context"))
2115 return;
2116
2117 local_lock(&memcg_stock.lock);
2118
2119 stock = this_cpu_ptr(&memcg_stock);
2120 drain_stock_fully(stock);
2121 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2122
2123 local_unlock(&memcg_stock.lock);
2124 }
2125
drain_local_obj_stock(struct work_struct * dummy)2126 static void drain_local_obj_stock(struct work_struct *dummy)
2127 {
2128 struct obj_stock_pcp *stock;
2129
2130 if (WARN_ONCE(!in_task(), "drain in non-task context"))
2131 return;
2132
2133 local_lock(&obj_stock.lock);
2134
2135 stock = this_cpu_ptr(&obj_stock);
2136 drain_obj_stock(stock);
2137 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2138
2139 local_unlock(&obj_stock.lock);
2140 }
2141
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2142 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2143 {
2144 struct memcg_stock_pcp *stock;
2145 struct mem_cgroup *cached;
2146 uint8_t stock_pages;
2147 bool success = false;
2148 int empty_slot = -1;
2149 int i;
2150
2151 /*
2152 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
2153 * decide to increase it more than 127 then we will need more careful
2154 * handling of nr_pages[] in struct memcg_stock_pcp.
2155 */
2156 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
2157
2158 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
2159
2160 if (nr_pages > MEMCG_CHARGE_BATCH ||
2161 !local_trylock(&memcg_stock.lock)) {
2162 /*
2163 * In case of larger than batch refill or unlikely failure to
2164 * lock the percpu memcg_stock.lock, uncharge memcg directly.
2165 */
2166 memcg_uncharge(memcg, nr_pages);
2167 return;
2168 }
2169
2170 stock = this_cpu_ptr(&memcg_stock);
2171 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2172 cached = READ_ONCE(stock->cached[i]);
2173 if (!cached && empty_slot == -1)
2174 empty_slot = i;
2175 if (memcg == READ_ONCE(stock->cached[i])) {
2176 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
2177 WRITE_ONCE(stock->nr_pages[i], stock_pages);
2178 if (stock_pages > MEMCG_CHARGE_BATCH)
2179 drain_stock(stock, i);
2180 success = true;
2181 break;
2182 }
2183 }
2184
2185 if (!success) {
2186 i = empty_slot;
2187 if (i == -1) {
2188 i = get_random_u32_below(NR_MEMCG_STOCK);
2189 drain_stock(stock, i);
2190 }
2191 css_get(&memcg->css);
2192 WRITE_ONCE(stock->cached[i], memcg);
2193 WRITE_ONCE(stock->nr_pages[i], nr_pages);
2194 }
2195
2196 local_unlock(&memcg_stock.lock);
2197 }
2198
is_memcg_drain_needed(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2199 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
2200 struct mem_cgroup *root_memcg)
2201 {
2202 struct mem_cgroup *memcg;
2203 bool flush = false;
2204 int i;
2205
2206 rcu_read_lock();
2207 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2208 memcg = READ_ONCE(stock->cached[i]);
2209 if (!memcg)
2210 continue;
2211
2212 if (READ_ONCE(stock->nr_pages[i]) &&
2213 mem_cgroup_is_descendant(memcg, root_memcg)) {
2214 flush = true;
2215 break;
2216 }
2217 }
2218 rcu_read_unlock();
2219 return flush;
2220 }
2221
schedule_drain_work(int cpu,struct work_struct * work)2222 static void schedule_drain_work(int cpu, struct work_struct *work)
2223 {
2224 /*
2225 * Protect housekeeping cpumask read and work enqueue together
2226 * in the same RCU critical section so that later cpuset isolated
2227 * partition update only need to wait for an RCU GP and flush the
2228 * pending work on newly isolated CPUs.
2229 */
2230 guard(rcu)();
2231 if (!cpu_is_isolated(cpu))
2232 queue_work_on(cpu, memcg_wq, work);
2233 }
2234
2235 /*
2236 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2237 * of the hierarchy under it.
2238 */
drain_all_stock(struct mem_cgroup * root_memcg)2239 void drain_all_stock(struct mem_cgroup *root_memcg)
2240 {
2241 int cpu, curcpu;
2242
2243 /* If someone's already draining, avoid adding running more workers. */
2244 if (!mutex_trylock(&percpu_charge_mutex))
2245 return;
2246 /*
2247 * Notify other cpus that system-wide "drain" is running
2248 * We do not care about races with the cpu hotplug because cpu down
2249 * as well as workers from this path always operate on the local
2250 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2251 */
2252 migrate_disable();
2253 curcpu = smp_processor_id();
2254 for_each_online_cpu(cpu) {
2255 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
2256 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
2257
2258 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
2259 is_memcg_drain_needed(memcg_st, root_memcg) &&
2260 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2261 &memcg_st->flags)) {
2262 if (cpu == curcpu)
2263 drain_local_memcg_stock(&memcg_st->work);
2264 else
2265 schedule_drain_work(cpu, &memcg_st->work);
2266 }
2267
2268 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2269 obj_stock_flush_required(obj_st, root_memcg) &&
2270 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2271 &obj_st->flags)) {
2272 if (cpu == curcpu)
2273 drain_local_obj_stock(&obj_st->work);
2274 else
2275 schedule_drain_work(cpu, &obj_st->work);
2276 }
2277 }
2278 migrate_enable();
2279 mutex_unlock(&percpu_charge_mutex);
2280 }
2281
memcg_hotplug_cpu_dead(unsigned int cpu)2282 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2283 {
2284 /* no need for the local lock */
2285 drain_obj_stock(&per_cpu(obj_stock, cpu));
2286 drain_stock_fully(&per_cpu(memcg_stock, cpu));
2287
2288 return 0;
2289 }
2290
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2291 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2292 unsigned int nr_pages,
2293 gfp_t gfp_mask)
2294 {
2295 unsigned long nr_reclaimed = 0;
2296
2297 do {
2298 unsigned long pflags;
2299
2300 if (page_counter_read(&memcg->memory) <=
2301 READ_ONCE(memcg->memory.high))
2302 continue;
2303
2304 memcg_memory_event(memcg, MEMCG_HIGH);
2305
2306 psi_memstall_enter(&pflags);
2307 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2308 gfp_mask,
2309 MEMCG_RECLAIM_MAY_SWAP,
2310 NULL);
2311 psi_memstall_leave(&pflags);
2312 } while ((memcg = parent_mem_cgroup(memcg)) &&
2313 !mem_cgroup_is_root(memcg));
2314
2315 return nr_reclaimed;
2316 }
2317
high_work_func(struct work_struct * work)2318 static void high_work_func(struct work_struct *work)
2319 {
2320 struct mem_cgroup *memcg;
2321
2322 memcg = container_of(work, struct mem_cgroup, high_work);
2323 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2324 }
2325
2326 /*
2327 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2328 * enough to still cause a significant slowdown in most cases, while still
2329 * allowing diagnostics and tracing to proceed without becoming stuck.
2330 */
2331 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2332
2333 /*
2334 * When calculating the delay, we use these either side of the exponentiation to
2335 * maintain precision and scale to a reasonable number of jiffies (see the table
2336 * below.
2337 *
2338 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2339 * overage ratio to a delay.
2340 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2341 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2342 * to produce a reasonable delay curve.
2343 *
2344 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2345 * reasonable delay curve compared to precision-adjusted overage, not
2346 * penalising heavily at first, but still making sure that growth beyond the
2347 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2348 * example, with a high of 100 megabytes:
2349 *
2350 * +-------+------------------------+
2351 * | usage | time to allocate in ms |
2352 * +-------+------------------------+
2353 * | 100M | 0 |
2354 * | 101M | 6 |
2355 * | 102M | 25 |
2356 * | 103M | 57 |
2357 * | 104M | 102 |
2358 * | 105M | 159 |
2359 * | 106M | 230 |
2360 * | 107M | 313 |
2361 * | 108M | 409 |
2362 * | 109M | 518 |
2363 * | 110M | 639 |
2364 * | 111M | 774 |
2365 * | 112M | 921 |
2366 * | 113M | 1081 |
2367 * | 114M | 1254 |
2368 * | 115M | 1439 |
2369 * | 116M | 1638 |
2370 * | 117M | 1849 |
2371 * | 118M | 2000 |
2372 * | 119M | 2000 |
2373 * | 120M | 2000 |
2374 * +-------+------------------------+
2375 */
2376 #define MEMCG_DELAY_PRECISION_SHIFT 20
2377 #define MEMCG_DELAY_SCALING_SHIFT 14
2378
calculate_overage(unsigned long usage,unsigned long high)2379 static u64 calculate_overage(unsigned long usage, unsigned long high)
2380 {
2381 u64 overage;
2382
2383 if (usage <= high)
2384 return 0;
2385
2386 /*
2387 * Prevent division by 0 in overage calculation by acting as if
2388 * it was a threshold of 1 page
2389 */
2390 high = max(high, 1UL);
2391
2392 overage = usage - high;
2393 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2394 return div64_u64(overage, high);
2395 }
2396
mem_find_max_overage(struct mem_cgroup * memcg)2397 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2398 {
2399 u64 overage, max_overage = 0;
2400
2401 do {
2402 overage = calculate_overage(page_counter_read(&memcg->memory),
2403 READ_ONCE(memcg->memory.high));
2404 max_overage = max(overage, max_overage);
2405 } while ((memcg = parent_mem_cgroup(memcg)) &&
2406 !mem_cgroup_is_root(memcg));
2407
2408 return max_overage;
2409 }
2410
swap_find_max_overage(struct mem_cgroup * memcg)2411 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2412 {
2413 u64 overage, max_overage = 0;
2414
2415 do {
2416 overage = calculate_overage(page_counter_read(&memcg->swap),
2417 READ_ONCE(memcg->swap.high));
2418 if (overage)
2419 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2420 max_overage = max(overage, max_overage);
2421 } while ((memcg = parent_mem_cgroup(memcg)) &&
2422 !mem_cgroup_is_root(memcg));
2423
2424 return max_overage;
2425 }
2426
2427 /*
2428 * Get the number of jiffies that we should penalise a mischievous cgroup which
2429 * is exceeding its memory.high by checking both it and its ancestors.
2430 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2431 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2432 unsigned int nr_pages,
2433 u64 max_overage)
2434 {
2435 unsigned long penalty_jiffies;
2436
2437 if (!max_overage)
2438 return 0;
2439
2440 /*
2441 * We use overage compared to memory.high to calculate the number of
2442 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2443 * fairly lenient on small overages, and increasingly harsh when the
2444 * memcg in question makes it clear that it has no intention of stopping
2445 * its crazy behaviour, so we exponentially increase the delay based on
2446 * overage amount.
2447 */
2448 penalty_jiffies = max_overage * max_overage * HZ;
2449 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2450 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2451
2452 /*
2453 * Factor in the task's own contribution to the overage, such that four
2454 * N-sized allocations are throttled approximately the same as one
2455 * 4N-sized allocation.
2456 *
2457 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2458 * larger the current charge patch is than that.
2459 */
2460 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2461 }
2462
2463 /*
2464 * Reclaims memory over the high limit. Called directly from
2465 * try_charge() (context permitting), as well as from the userland
2466 * return path where reclaim is always able to block.
2467 */
__mem_cgroup_handle_over_high(gfp_t gfp_mask)2468 void __mem_cgroup_handle_over_high(gfp_t gfp_mask)
2469 {
2470 unsigned long penalty_jiffies;
2471 unsigned long pflags;
2472 unsigned long nr_reclaimed;
2473 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2474 int nr_retries = MAX_RECLAIM_RETRIES;
2475 struct mem_cgroup *memcg;
2476 bool in_retry = false;
2477
2478 memcg = get_mem_cgroup_from_mm(current->mm);
2479 current->memcg_nr_pages_over_high = 0;
2480
2481 retry_reclaim:
2482 /*
2483 * Bail if the task is already exiting. Unlike memory.max,
2484 * memory.high enforcement isn't as strict, and there is no
2485 * OOM killer involved, which means the excess could already
2486 * be much bigger (and still growing) than it could for
2487 * memory.max; the dying task could get stuck in fruitless
2488 * reclaim for a long time, which isn't desirable.
2489 */
2490 if (task_is_dying())
2491 goto out;
2492
2493 /*
2494 * The allocating task should reclaim at least the batch size, but for
2495 * subsequent retries we only want to do what's necessary to prevent oom
2496 * or breaching resource isolation.
2497 *
2498 * This is distinct from memory.max or page allocator behaviour because
2499 * memory.high is currently batched, whereas memory.max and the page
2500 * allocator run every time an allocation is made.
2501 */
2502 nr_reclaimed = reclaim_high(memcg,
2503 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2504 gfp_mask);
2505
2506 /*
2507 * memory.high is breached and reclaim is unable to keep up. Throttle
2508 * allocators proactively to slow down excessive growth.
2509 */
2510 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2511 mem_find_max_overage(memcg));
2512
2513 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2514 swap_find_max_overage(memcg));
2515
2516 /*
2517 * Clamp the max delay per usermode return so as to still keep the
2518 * application moving forwards and also permit diagnostics, albeit
2519 * extremely slowly.
2520 */
2521 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2522
2523 /*
2524 * Don't sleep if the amount of jiffies this memcg owes us is so low
2525 * that it's not even worth doing, in an attempt to be nice to those who
2526 * go only a small amount over their memory.high value and maybe haven't
2527 * been aggressively reclaimed enough yet.
2528 */
2529 if (penalty_jiffies <= HZ / 100)
2530 goto out;
2531
2532 /*
2533 * If reclaim is making forward progress but we're still over
2534 * memory.high, we want to encourage that rather than doing allocator
2535 * throttling.
2536 */
2537 if (nr_reclaimed || nr_retries--) {
2538 in_retry = true;
2539 goto retry_reclaim;
2540 }
2541
2542 /*
2543 * Reclaim didn't manage to push usage below the limit, slow
2544 * this allocating task down.
2545 *
2546 * If we exit early, we're guaranteed to die (since
2547 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2548 * need to account for any ill-begotten jiffies to pay them off later.
2549 */
2550 psi_memstall_enter(&pflags);
2551 schedule_timeout_killable(penalty_jiffies);
2552 psi_memstall_leave(&pflags);
2553
2554 out:
2555 css_put(&memcg->css);
2556 }
2557
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2558 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2559 unsigned int nr_pages)
2560 {
2561 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2562 int nr_retries = MAX_RECLAIM_RETRIES;
2563 struct mem_cgroup *mem_over_limit;
2564 struct page_counter *counter;
2565 unsigned long nr_reclaimed;
2566 bool passed_oom = false;
2567 unsigned int reclaim_options;
2568 bool drained = false;
2569 bool raised_max_event = false;
2570 unsigned long pflags;
2571 bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
2572
2573 retry:
2574 if (consume_stock(memcg, nr_pages))
2575 return 0;
2576
2577 if (!allow_spinning)
2578 /* Avoid the refill and flush of the older stock */
2579 batch = nr_pages;
2580
2581 reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2582 if (!do_memsw_account() ||
2583 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2584 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2585 goto done_restock;
2586 if (do_memsw_account())
2587 page_counter_uncharge(&memcg->memsw, batch);
2588 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2589 } else {
2590 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2591 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2592 }
2593
2594 if (batch > nr_pages) {
2595 batch = nr_pages;
2596 goto retry;
2597 }
2598
2599 /*
2600 * Prevent unbounded recursion when reclaim operations need to
2601 * allocate memory. This might exceed the limits temporarily,
2602 * but we prefer facilitating memory reclaim and getting back
2603 * under the limit over triggering OOM kills in these cases.
2604 */
2605 if (unlikely(current->flags & PF_MEMALLOC))
2606 goto force;
2607
2608 if (unlikely(task_in_memcg_oom(current)))
2609 goto nomem;
2610
2611 if (!gfpflags_allow_blocking(gfp_mask))
2612 goto nomem;
2613
2614 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2615 raised_max_event = true;
2616
2617 psi_memstall_enter(&pflags);
2618 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2619 gfp_mask, reclaim_options, NULL);
2620 psi_memstall_leave(&pflags);
2621
2622 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2623 goto retry;
2624
2625 if (!drained) {
2626 drain_all_stock(mem_over_limit);
2627 drained = true;
2628 goto retry;
2629 }
2630
2631 if (gfp_mask & __GFP_NORETRY)
2632 goto nomem;
2633 /*
2634 * Even though the limit is exceeded at this point, reclaim
2635 * may have been able to free some pages. Retry the charge
2636 * before killing the task.
2637 *
2638 * Only for regular pages, though: huge pages are rather
2639 * unlikely to succeed so close to the limit, and we fall back
2640 * to regular pages anyway in case of failure.
2641 */
2642 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2643 goto retry;
2644
2645 if (nr_retries--)
2646 goto retry;
2647
2648 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2649 goto nomem;
2650
2651 /* Avoid endless loop for tasks bypassed by the oom killer */
2652 if (passed_oom && task_is_dying())
2653 goto nomem;
2654
2655 /*
2656 * keep retrying as long as the memcg oom killer is able to make
2657 * a forward progress or bypass the charge if the oom killer
2658 * couldn't make any progress.
2659 */
2660 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2661 get_order(nr_pages * PAGE_SIZE))) {
2662 passed_oom = true;
2663 nr_retries = MAX_RECLAIM_RETRIES;
2664 goto retry;
2665 }
2666 nomem:
2667 /*
2668 * Memcg doesn't have a dedicated reserve for atomic
2669 * allocations. But like the global atomic pool, we need to
2670 * put the burden of reclaim on regular allocation requests
2671 * and let these go through as privileged allocations.
2672 */
2673 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2674 return -ENOMEM;
2675 force:
2676 /*
2677 * If the allocation has to be enforced, don't forget to raise
2678 * a MEMCG_MAX event.
2679 */
2680 if (!raised_max_event)
2681 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2682
2683 /*
2684 * The allocation either can't fail or will lead to more memory
2685 * being freed very soon. Allow memory usage go over the limit
2686 * temporarily by force charging it.
2687 */
2688 page_counter_charge(&memcg->memory, nr_pages);
2689 if (do_memsw_account())
2690 page_counter_charge(&memcg->memsw, nr_pages);
2691
2692 return 0;
2693
2694 done_restock:
2695 if (batch > nr_pages)
2696 refill_stock(memcg, batch - nr_pages);
2697
2698 /*
2699 * If the hierarchy is above the normal consumption range, schedule
2700 * reclaim on returning to userland. We can perform reclaim here
2701 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2702 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2703 * not recorded as it most likely matches current's and won't
2704 * change in the meantime. As high limit is checked again before
2705 * reclaim, the cost of mismatch is negligible.
2706 */
2707 do {
2708 bool mem_high, swap_high;
2709
2710 mem_high = page_counter_read(&memcg->memory) >
2711 READ_ONCE(memcg->memory.high);
2712 swap_high = page_counter_read(&memcg->swap) >
2713 READ_ONCE(memcg->swap.high);
2714
2715 /* Don't bother a random interrupted task */
2716 if (!in_task()) {
2717 if (mem_high) {
2718 schedule_work(&memcg->high_work);
2719 break;
2720 }
2721 continue;
2722 }
2723
2724 if (mem_high || swap_high) {
2725 /*
2726 * The allocating tasks in this cgroup will need to do
2727 * reclaim or be throttled to prevent further growth
2728 * of the memory or swap footprints.
2729 *
2730 * Target some best-effort fairness between the tasks,
2731 * and distribute reclaim work and delay penalties
2732 * based on how much each task is actually allocating.
2733 */
2734 current->memcg_nr_pages_over_high += batch;
2735 set_notify_resume(current);
2736 break;
2737 }
2738 } while ((memcg = parent_mem_cgroup(memcg)));
2739
2740 /*
2741 * Reclaim is set up above to be called from the userland
2742 * return path. But also attempt synchronous reclaim to avoid
2743 * excessive overrun while the task is still inside the
2744 * kernel. If this is successful, the return path will see it
2745 * when it rechecks the overage and simply bail out.
2746 */
2747 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2748 !(current->flags & PF_MEMALLOC) &&
2749 gfpflags_allow_blocking(gfp_mask))
2750 __mem_cgroup_handle_over_high(gfp_mask);
2751 return 0;
2752 }
2753
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2754 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2755 unsigned int nr_pages)
2756 {
2757 if (mem_cgroup_is_root(memcg))
2758 return 0;
2759
2760 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2761 }
2762
commit_charge(struct folio * folio,struct obj_cgroup * objcg)2763 static void commit_charge(struct folio *folio, struct obj_cgroup *objcg)
2764 {
2765 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2766 /*
2767 * Any of the following ensures folio's objcg stability:
2768 *
2769 * - the page lock
2770 * - LRU isolation
2771 * - exclusive reference
2772 */
2773 folio->memcg_data = (unsigned long)objcg;
2774 }
2775
2776 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2777 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2778 struct pglist_data *pgdat,
2779 enum node_stat_item idx, int nr)
2780 {
2781 struct lruvec *lruvec;
2782
2783 if (likely(!in_nmi())) {
2784 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2785 mod_memcg_lruvec_state(lruvec, idx, nr);
2786 } else {
2787 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2788
2789 /* preemption is disabled in_nmi(). */
2790 css_rstat_updated(&memcg->css, smp_processor_id());
2791 if (idx == NR_SLAB_RECLAIMABLE_B)
2792 atomic_add(nr, &pn->slab_reclaimable);
2793 else
2794 atomic_add(nr, &pn->slab_unreclaimable);
2795 }
2796 }
2797 #else
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2798 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2799 struct pglist_data *pgdat,
2800 enum node_stat_item idx, int nr)
2801 {
2802 struct lruvec *lruvec;
2803
2804 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2805 mod_memcg_lruvec_state(lruvec, idx, nr);
2806 }
2807 #endif
2808
mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2809 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2810 struct pglist_data *pgdat,
2811 enum node_stat_item idx, int nr)
2812 {
2813 struct mem_cgroup *memcg;
2814
2815 rcu_read_lock();
2816 memcg = obj_cgroup_memcg(objcg);
2817 account_slab_nmi_safe(memcg, pgdat, idx, nr);
2818 rcu_read_unlock();
2819 }
2820
2821 static __always_inline
mem_cgroup_from_obj_slab(struct slab * slab,void * p)2822 struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
2823 {
2824 /*
2825 * Slab objects are accounted individually, not per-page.
2826 * Memcg membership data for each individual object is saved in
2827 * slab->obj_exts.
2828 */
2829 unsigned long obj_exts;
2830 struct slabobj_ext *obj_ext;
2831 unsigned int off;
2832
2833 obj_exts = slab_obj_exts(slab);
2834 if (!obj_exts)
2835 return NULL;
2836
2837 get_slab_obj_exts(obj_exts);
2838 off = obj_to_index(slab->slab_cache, slab, p);
2839 obj_ext = slab_obj_ext(slab, obj_exts, off);
2840 if (obj_ext->objcg) {
2841 struct obj_cgroup *objcg = obj_ext->objcg;
2842
2843 put_slab_obj_exts(obj_exts);
2844 return obj_cgroup_memcg(objcg);
2845 }
2846 put_slab_obj_exts(obj_exts);
2847
2848 return NULL;
2849 }
2850
2851 /*
2852 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2853 * It is not suitable for objects allocated using vmalloc().
2854 *
2855 * A passed kernel object must be a slab object or a generic kernel page.
2856 *
2857 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2858 * cgroup_mutex, etc.
2859 */
mem_cgroup_from_virt(void * p)2860 struct mem_cgroup *mem_cgroup_from_virt(void *p)
2861 {
2862 struct slab *slab;
2863
2864 if (mem_cgroup_disabled())
2865 return NULL;
2866
2867 slab = virt_to_slab(p);
2868 if (slab)
2869 return mem_cgroup_from_obj_slab(slab, p);
2870 return folio_memcg_check(virt_to_folio(p));
2871 }
2872
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2873 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2874 {
2875 int nid = numa_node_id();
2876
2877 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2878 struct obj_cgroup *objcg = rcu_dereference(memcg->nodeinfo[nid]->objcg);
2879
2880 if (likely(objcg && obj_cgroup_tryget(objcg)))
2881 return objcg;
2882 }
2883
2884 return NULL;
2885 }
2886
get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2887 static inline struct obj_cgroup *get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2888 {
2889 struct obj_cgroup *objcg;
2890
2891 rcu_read_lock();
2892 objcg = __get_obj_cgroup_from_memcg(memcg);
2893 rcu_read_unlock();
2894
2895 return objcg;
2896 }
2897
current_objcg_update(void)2898 static struct obj_cgroup *current_objcg_update(void)
2899 {
2900 struct mem_cgroup *memcg;
2901 struct obj_cgroup *old, *objcg = NULL;
2902
2903 do {
2904 /* Atomically drop the update bit. */
2905 old = xchg(¤t->objcg, NULL);
2906 if (old) {
2907 old = (struct obj_cgroup *)
2908 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2909 obj_cgroup_put(old);
2910
2911 old = NULL;
2912 }
2913
2914 /* If new objcg is NULL, no reason for the second atomic update. */
2915 if (!current->mm || (current->flags & PF_KTHREAD))
2916 return NULL;
2917
2918 /*
2919 * Release the objcg pointer from the previous iteration,
2920 * if try_cmpxcg() below fails.
2921 */
2922 if (unlikely(objcg)) {
2923 obj_cgroup_put(objcg);
2924 objcg = NULL;
2925 }
2926
2927 /*
2928 * Obtain the new objcg pointer. The current task can be
2929 * asynchronously moved to another memcg and the previous
2930 * memcg can be offlined. So let's get the memcg pointer
2931 * and try get a reference to objcg under a rcu read lock.
2932 */
2933
2934 rcu_read_lock();
2935 memcg = mem_cgroup_from_task(current);
2936 objcg = __get_obj_cgroup_from_memcg(memcg);
2937 rcu_read_unlock();
2938
2939 /*
2940 * Try set up a new objcg pointer atomically. If it
2941 * fails, it means the update flag was set concurrently, so
2942 * the whole procedure should be repeated.
2943 */
2944 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2945
2946 return objcg;
2947 }
2948
current_obj_cgroup(void)2949 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2950 {
2951 struct mem_cgroup *memcg;
2952 struct obj_cgroup *objcg;
2953 int nid = numa_node_id();
2954
2955 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2956 return NULL;
2957
2958 if (in_task()) {
2959 memcg = current->active_memcg;
2960 if (unlikely(memcg))
2961 goto from_memcg;
2962
2963 objcg = READ_ONCE(current->objcg);
2964 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2965 objcg = current_objcg_update();
2966 /*
2967 * Objcg reference is kept by the task, so it's safe
2968 * to use the objcg by the current task.
2969 */
2970 return objcg ? : rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
2971 }
2972
2973 memcg = this_cpu_read(int_active_memcg);
2974 if (unlikely(memcg))
2975 goto from_memcg;
2976
2977 return rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
2978
2979 from_memcg:
2980 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2981 /*
2982 * Memcg pointer is protected by scope (see set_active_memcg())
2983 * and is pinning the corresponding objcg, so objcg can't go
2984 * away and can be used within the scope without any additional
2985 * protection.
2986 */
2987 objcg = rcu_dereference_check(memcg->nodeinfo[nid]->objcg, 1);
2988 if (likely(objcg))
2989 return objcg;
2990 }
2991
2992 return rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
2993 }
2994
get_obj_cgroup_from_folio(struct folio * folio)2995 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2996 {
2997 struct obj_cgroup *objcg;
2998
2999 objcg = folio_objcg(folio);
3000 if (objcg)
3001 obj_cgroup_get(objcg);
3002
3003 return objcg;
3004 }
3005
3006 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)3007 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
3008 {
3009 if (likely(!in_nmi())) {
3010 mod_memcg_state(memcg, MEMCG_KMEM, val);
3011 } else {
3012 /* preemption is disabled in_nmi(). */
3013 css_rstat_updated(&memcg->css, smp_processor_id());
3014 atomic_add(val, &memcg->kmem_stat);
3015 }
3016 }
3017 #else
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)3018 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
3019 {
3020 mod_memcg_state(memcg, MEMCG_KMEM, val);
3021 }
3022 #endif
3023
3024 /*
3025 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3026 * @objcg: object cgroup to uncharge
3027 * @nr_pages: number of pages to uncharge
3028 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)3029 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3030 unsigned int nr_pages)
3031 {
3032 struct mem_cgroup *memcg;
3033
3034 memcg = get_mem_cgroup_from_objcg(objcg);
3035
3036 account_kmem_nmi_safe(memcg, -nr_pages);
3037 memcg1_account_kmem(memcg, -nr_pages);
3038 if (!mem_cgroup_is_root(memcg))
3039 refill_stock(memcg, nr_pages);
3040
3041 css_put(&memcg->css);
3042 }
3043
3044 /*
3045 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3046 * @objcg: object cgroup to charge
3047 * @gfp: reclaim mode
3048 * @nr_pages: number of pages to charge
3049 *
3050 * Returns 0 on success, an error code on failure.
3051 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)3052 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3053 unsigned int nr_pages)
3054 {
3055 struct mem_cgroup *memcg;
3056 int ret;
3057
3058 memcg = get_mem_cgroup_from_objcg(objcg);
3059
3060 ret = try_charge_memcg(memcg, gfp, nr_pages);
3061 if (ret)
3062 goto out;
3063
3064 account_kmem_nmi_safe(memcg, nr_pages);
3065 memcg1_account_kmem(memcg, nr_pages);
3066 out:
3067 css_put(&memcg->css);
3068
3069 return ret;
3070 }
3071
page_objcg(const struct page * page)3072 static struct obj_cgroup *page_objcg(const struct page *page)
3073 {
3074 unsigned long memcg_data = page->memcg_data;
3075
3076 if (mem_cgroup_disabled() || !memcg_data)
3077 return NULL;
3078
3079 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
3080 page);
3081 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
3082 }
3083
page_set_objcg(struct page * page,const struct obj_cgroup * objcg)3084 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
3085 {
3086 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
3087 }
3088
3089 /**
3090 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3091 * @page: page to charge
3092 * @gfp: reclaim mode
3093 * @order: allocation order
3094 *
3095 * Returns 0 on success, an error code on failure.
3096 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)3097 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3098 {
3099 struct obj_cgroup *objcg;
3100 int ret = 0;
3101
3102 objcg = current_obj_cgroup();
3103 if (objcg && !obj_cgroup_is_root(objcg)) {
3104 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3105 if (!ret) {
3106 obj_cgroup_get(objcg);
3107 page_set_objcg(page, objcg);
3108 return 0;
3109 }
3110 }
3111 return ret;
3112 }
3113
3114 /**
3115 * __memcg_kmem_uncharge_page: uncharge a kmem page
3116 * @page: page to uncharge
3117 * @order: allocation order
3118 */
__memcg_kmem_uncharge_page(struct page * page,int order)3119 void __memcg_kmem_uncharge_page(struct page *page, int order)
3120 {
3121 struct obj_cgroup *objcg = page_objcg(page);
3122 unsigned int nr_pages = 1 << order;
3123
3124 if (!objcg)
3125 return;
3126
3127 obj_cgroup_uncharge_pages(objcg, nr_pages);
3128 page->memcg_data = 0;
3129 obj_cgroup_put(objcg);
3130 }
3131
trylock_stock(void)3132 static struct obj_stock_pcp *trylock_stock(void)
3133 {
3134 if (local_trylock(&obj_stock.lock))
3135 return this_cpu_ptr(&obj_stock);
3136
3137 return NULL;
3138 }
3139
unlock_stock(struct obj_stock_pcp * stock)3140 static void unlock_stock(struct obj_stock_pcp *stock)
3141 {
3142 if (stock)
3143 local_unlock(&obj_stock.lock);
3144 }
3145
3146 /* Call after __refill_obj_stock() to ensure stock->cached_objg == objcg */
__account_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,int nr,struct pglist_data * pgdat,enum node_stat_item idx)3147 static void __account_obj_stock(struct obj_cgroup *objcg,
3148 struct obj_stock_pcp *stock, int nr,
3149 struct pglist_data *pgdat, enum node_stat_item idx)
3150 {
3151 int *bytes;
3152
3153 if (!stock || READ_ONCE(stock->cached_objcg) != objcg)
3154 goto direct;
3155
3156 /*
3157 * Save vmstat data in stock and skip vmstat array update unless
3158 * accumulating over a page of vmstat data or when pgdat changes.
3159 */
3160 if (stock->cached_pgdat != pgdat) {
3161 /* Flush the existing cached vmstat data */
3162 struct pglist_data *oldpg = stock->cached_pgdat;
3163
3164 if (stock->nr_slab_reclaimable_b) {
3165 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3166 stock->nr_slab_reclaimable_b);
3167 stock->nr_slab_reclaimable_b = 0;
3168 }
3169 if (stock->nr_slab_unreclaimable_b) {
3170 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3171 stock->nr_slab_unreclaimable_b);
3172 stock->nr_slab_unreclaimable_b = 0;
3173 }
3174 stock->cached_pgdat = pgdat;
3175 }
3176
3177 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3178 : &stock->nr_slab_unreclaimable_b;
3179 /*
3180 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3181 * cached locally at least once before pushing it out.
3182 */
3183 if (!*bytes) {
3184 *bytes = nr;
3185 nr = 0;
3186 } else {
3187 *bytes += nr;
3188 if (abs(*bytes) > PAGE_SIZE) {
3189 nr = *bytes;
3190 *bytes = 0;
3191 } else {
3192 nr = 0;
3193 }
3194 }
3195 direct:
3196 if (nr)
3197 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3198 }
3199
__consume_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,unsigned int nr_bytes)3200 static bool __consume_obj_stock(struct obj_cgroup *objcg,
3201 struct obj_stock_pcp *stock,
3202 unsigned int nr_bytes)
3203 {
3204 if (objcg == READ_ONCE(stock->cached_objcg) &&
3205 stock->nr_bytes >= nr_bytes) {
3206 stock->nr_bytes -= nr_bytes;
3207 return true;
3208 }
3209
3210 return false;
3211 }
3212
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3213 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3214 {
3215 struct obj_stock_pcp *stock;
3216 bool ret = false;
3217
3218 stock = trylock_stock();
3219 if (!stock)
3220 return ret;
3221
3222 ret = __consume_obj_stock(objcg, stock, nr_bytes);
3223 unlock_stock(stock);
3224
3225 return ret;
3226 }
3227
drain_obj_stock(struct obj_stock_pcp * stock)3228 static void drain_obj_stock(struct obj_stock_pcp *stock)
3229 {
3230 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3231
3232 if (!old)
3233 return;
3234
3235 if (stock->nr_bytes) {
3236 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3237 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3238
3239 if (nr_pages) {
3240 struct mem_cgroup *memcg;
3241
3242 memcg = get_mem_cgroup_from_objcg(old);
3243
3244 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
3245 memcg1_account_kmem(memcg, -nr_pages);
3246 if (!mem_cgroup_is_root(memcg))
3247 memcg_uncharge(memcg, nr_pages);
3248
3249 css_put(&memcg->css);
3250 }
3251
3252 /*
3253 * The leftover is flushed to the centralized per-memcg value.
3254 * On the next attempt to refill obj stock it will be moved
3255 * to a per-cpu stock (probably, on an other CPU), see
3256 * refill_obj_stock().
3257 *
3258 * How often it's flushed is a trade-off between the memory
3259 * limit enforcement accuracy and potential CPU contention,
3260 * so it might be changed in the future.
3261 */
3262 atomic_add(nr_bytes, &old->nr_charged_bytes);
3263 stock->nr_bytes = 0;
3264 }
3265
3266 /*
3267 * Flush the vmstat data in current stock
3268 */
3269 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3270 if (stock->nr_slab_reclaimable_b) {
3271 mod_objcg_mlstate(old, stock->cached_pgdat,
3272 NR_SLAB_RECLAIMABLE_B,
3273 stock->nr_slab_reclaimable_b);
3274 stock->nr_slab_reclaimable_b = 0;
3275 }
3276 if (stock->nr_slab_unreclaimable_b) {
3277 mod_objcg_mlstate(old, stock->cached_pgdat,
3278 NR_SLAB_UNRECLAIMABLE_B,
3279 stock->nr_slab_unreclaimable_b);
3280 stock->nr_slab_unreclaimable_b = 0;
3281 }
3282 stock->cached_pgdat = NULL;
3283 }
3284
3285 WRITE_ONCE(stock->cached_objcg, NULL);
3286 obj_cgroup_put(old);
3287 }
3288
obj_stock_flush_required(struct obj_stock_pcp * stock,struct mem_cgroup * root_memcg)3289 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3290 struct mem_cgroup *root_memcg)
3291 {
3292 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3293 struct mem_cgroup *memcg;
3294 bool flush = false;
3295
3296 rcu_read_lock();
3297 if (objcg) {
3298 memcg = obj_cgroup_memcg(objcg);
3299 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3300 flush = true;
3301 }
3302 rcu_read_unlock();
3303
3304 return flush;
3305 }
3306
__refill_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,unsigned int nr_bytes,bool allow_uncharge)3307 static void __refill_obj_stock(struct obj_cgroup *objcg,
3308 struct obj_stock_pcp *stock,
3309 unsigned int nr_bytes,
3310 bool allow_uncharge)
3311 {
3312 unsigned int nr_pages = 0;
3313
3314 if (!stock) {
3315 nr_pages = nr_bytes >> PAGE_SHIFT;
3316 nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3317 atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3318 goto out;
3319 }
3320
3321 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3322 drain_obj_stock(stock);
3323 obj_cgroup_get(objcg);
3324 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3325 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3326 WRITE_ONCE(stock->cached_objcg, objcg);
3327
3328 allow_uncharge = true; /* Allow uncharge when objcg changes */
3329 }
3330 stock->nr_bytes += nr_bytes;
3331
3332 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3333 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3334 stock->nr_bytes &= (PAGE_SIZE - 1);
3335 }
3336
3337 out:
3338 if (nr_pages)
3339 obj_cgroup_uncharge_pages(objcg, nr_pages);
3340 }
3341
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)3342 static void refill_obj_stock(struct obj_cgroup *objcg,
3343 unsigned int nr_bytes,
3344 bool allow_uncharge)
3345 {
3346 struct obj_stock_pcp *stock = trylock_stock();
3347 __refill_obj_stock(objcg, stock, nr_bytes, allow_uncharge);
3348 unlock_stock(stock);
3349 }
3350
__obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size,size_t * remainder)3351 static int __obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp,
3352 size_t size, size_t *remainder)
3353 {
3354 size_t charge_size;
3355 int ret;
3356
3357 charge_size = PAGE_ALIGN(size);
3358 ret = obj_cgroup_charge_pages(objcg, gfp, charge_size >> PAGE_SHIFT);
3359 if (!ret)
3360 *remainder = charge_size - size;
3361
3362 return ret;
3363 }
3364
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3365 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3366 {
3367 size_t remainder;
3368 int ret;
3369
3370 if (likely(consume_obj_stock(objcg, size)))
3371 return 0;
3372
3373 /*
3374 * In theory, objcg->nr_charged_bytes can have enough
3375 * pre-charged bytes to satisfy the allocation. However,
3376 * flushing objcg->nr_charged_bytes requires two atomic
3377 * operations, and objcg->nr_charged_bytes can't be big.
3378 * The shared objcg->nr_charged_bytes can also become a
3379 * performance bottleneck if all tasks of the same memcg are
3380 * trying to update it. So it's better to ignore it and try
3381 * grab some new pages. The stock's nr_bytes will be flushed to
3382 * objcg->nr_charged_bytes later on when objcg changes.
3383 *
3384 * The stock's nr_bytes may contain enough pre-charged bytes
3385 * to allow one less page from being charged, but we can't rely
3386 * on the pre-charged bytes not being changed outside of
3387 * consume_obj_stock() or refill_obj_stock(). So ignore those
3388 * pre-charged bytes as well when charging pages. To avoid a
3389 * page uncharge right after a page charge, we set the
3390 * allow_uncharge flag to false when calling refill_obj_stock()
3391 * to temporarily allow the pre-charged bytes to exceed the page
3392 * size limit. The maximum reachable value of the pre-charged
3393 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3394 * race.
3395 */
3396 ret = __obj_cgroup_charge(objcg, gfp, size, &remainder);
3397 if (!ret && remainder)
3398 refill_obj_stock(objcg, remainder, false);
3399
3400 return ret;
3401 }
3402
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3403 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3404 {
3405 refill_obj_stock(objcg, size, true);
3406 }
3407
obj_full_size(struct kmem_cache * s)3408 static inline size_t obj_full_size(struct kmem_cache *s)
3409 {
3410 /*
3411 * For each accounted object there is an extra space which is used
3412 * to store obj_cgroup membership. Charge it too.
3413 */
3414 return s->size + sizeof(struct obj_cgroup *);
3415 }
3416
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)3417 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3418 gfp_t flags, size_t size, void **p)
3419 {
3420 size_t obj_size = obj_full_size(s);
3421 struct obj_cgroup *objcg;
3422 struct slab *slab;
3423 unsigned long off;
3424 size_t i;
3425
3426 /*
3427 * The obtained objcg pointer is safe to use within the current scope,
3428 * defined by current task or set_active_memcg() pair.
3429 * obj_cgroup_get() is used to get a permanent reference.
3430 */
3431 objcg = current_obj_cgroup();
3432 if (!objcg || obj_cgroup_is_root(objcg))
3433 return true;
3434
3435 /*
3436 * slab_alloc_node() avoids the NULL check, so we might be called with a
3437 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3438 * the whole requested size.
3439 * return success as there's nothing to free back
3440 */
3441 if (unlikely(*p == NULL))
3442 return true;
3443
3444 flags &= gfp_allowed_mask;
3445
3446 if (lru) {
3447 int ret;
3448 struct mem_cgroup *memcg;
3449
3450 memcg = get_mem_cgroup_from_objcg(objcg);
3451 ret = memcg_list_lru_alloc(memcg, lru, flags);
3452 css_put(&memcg->css);
3453
3454 if (ret)
3455 return false;
3456 }
3457
3458 for (i = 0; i < size; i++) {
3459 unsigned long obj_exts;
3460 struct slabobj_ext *obj_ext;
3461 struct obj_stock_pcp *stock;
3462
3463 slab = virt_to_slab(p[i]);
3464
3465 if (!slab_obj_exts(slab) &&
3466 alloc_slab_obj_exts(slab, s, flags, false)) {
3467 continue;
3468 }
3469
3470 /*
3471 * if we fail and size is 1, memcg_alloc_abort_single() will
3472 * just free the object, which is ok as we have not assigned
3473 * objcg to its obj_ext yet
3474 *
3475 * for larger sizes, kmem_cache_free_bulk() will uncharge
3476 * any objects that were already charged and obj_ext assigned
3477 *
3478 * TODO: we could batch this until slab_pgdat(slab) changes
3479 * between iterations, with a more complicated undo
3480 */
3481 stock = trylock_stock();
3482 if (!stock || !__consume_obj_stock(objcg, stock, obj_size)) {
3483 size_t remainder;
3484
3485 unlock_stock(stock);
3486 if (__obj_cgroup_charge(objcg, flags, obj_size, &remainder))
3487 return false;
3488 stock = trylock_stock();
3489 if (remainder)
3490 __refill_obj_stock(objcg, stock, remainder, false);
3491 }
3492 __account_obj_stock(objcg, stock, obj_size,
3493 slab_pgdat(slab), cache_vmstat_idx(s));
3494 unlock_stock(stock);
3495
3496 obj_exts = slab_obj_exts(slab);
3497 get_slab_obj_exts(obj_exts);
3498 off = obj_to_index(s, slab, p[i]);
3499 obj_ext = slab_obj_ext(slab, obj_exts, off);
3500 obj_cgroup_get(objcg);
3501 obj_ext->objcg = objcg;
3502 put_slab_obj_exts(obj_exts);
3503 }
3504
3505 return true;
3506 }
3507
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,unsigned long obj_exts)3508 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3509 void **p, int objects, unsigned long obj_exts)
3510 {
3511 size_t obj_size = obj_full_size(s);
3512
3513 for (int i = 0; i < objects; i++) {
3514 struct obj_cgroup *objcg;
3515 struct slabobj_ext *obj_ext;
3516 struct obj_stock_pcp *stock;
3517 unsigned int off;
3518
3519 off = obj_to_index(s, slab, p[i]);
3520 obj_ext = slab_obj_ext(slab, obj_exts, off);
3521 objcg = obj_ext->objcg;
3522 if (!objcg)
3523 continue;
3524
3525 obj_ext->objcg = NULL;
3526
3527 stock = trylock_stock();
3528 __refill_obj_stock(objcg, stock, obj_size, true);
3529 __account_obj_stock(objcg, stock, -obj_size,
3530 slab_pgdat(slab), cache_vmstat_idx(s));
3531 unlock_stock(stock);
3532
3533 obj_cgroup_put(objcg);
3534 }
3535 }
3536
3537 /*
3538 * The objcg is only set on the first page, so transfer it to all the
3539 * other pages.
3540 */
split_page_memcg(struct page * page,unsigned order)3541 void split_page_memcg(struct page *page, unsigned order)
3542 {
3543 struct obj_cgroup *objcg = page_objcg(page);
3544 unsigned int i, nr = 1 << order;
3545
3546 if (!objcg)
3547 return;
3548
3549 for (i = 1; i < nr; i++)
3550 page_set_objcg(&page[i], objcg);
3551
3552 obj_cgroup_get_many(objcg, nr - 1);
3553 }
3554
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)3555 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3556 unsigned new_order)
3557 {
3558 unsigned new_refs;
3559
3560 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3561 return;
3562
3563 new_refs = (1 << (old_order - new_order)) - 1;
3564 obj_cgroup_get_many(folio_objcg(folio), new_refs);
3565 }
3566
memcg_online_kmem(struct mem_cgroup * memcg)3567 static void memcg_online_kmem(struct mem_cgroup *memcg)
3568 {
3569 if (mem_cgroup_kmem_disabled())
3570 return;
3571
3572 if (unlikely(mem_cgroup_is_root(memcg)))
3573 return;
3574
3575 static_branch_enable(&memcg_kmem_online_key);
3576
3577 memcg->kmemcg_id = memcg->id.id;
3578 }
3579
memcg_offline_kmem(struct mem_cgroup * memcg)3580 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3581 {
3582 struct mem_cgroup *parent;
3583
3584 if (mem_cgroup_kmem_disabled())
3585 return;
3586
3587 if (unlikely(mem_cgroup_is_root(memcg)))
3588 return;
3589
3590 parent = parent_mem_cgroup(memcg);
3591 memcg_reparent_list_lrus(memcg, parent);
3592 }
3593
3594 #ifdef CONFIG_CGROUP_WRITEBACK
3595
3596 #include <trace/events/writeback.h>
3597
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3598 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3599 {
3600 return wb_domain_init(&memcg->cgwb_domain, gfp);
3601 }
3602
memcg_wb_domain_exit(struct mem_cgroup * memcg)3603 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3604 {
3605 wb_domain_exit(&memcg->cgwb_domain);
3606 }
3607
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3608 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3609 {
3610 wb_domain_size_changed(&memcg->cgwb_domain);
3611 }
3612
mem_cgroup_wb_domain(struct bdi_writeback * wb)3613 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3614 {
3615 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3616
3617 if (!memcg->css.parent)
3618 return NULL;
3619
3620 return &memcg->cgwb_domain;
3621 }
3622
3623 /**
3624 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3625 * @wb: bdi_writeback in question
3626 * @pfilepages: out parameter for number of file pages
3627 * @pheadroom: out parameter for number of allocatable pages according to memcg
3628 * @pdirty: out parameter for number of dirty pages
3629 * @pwriteback: out parameter for number of pages under writeback
3630 *
3631 * Determine the numbers of file, headroom, dirty, and writeback pages in
3632 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3633 * is a bit more involved.
3634 *
3635 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3636 * headroom is calculated as the lowest headroom of itself and the
3637 * ancestors. Note that this doesn't consider the actual amount of
3638 * available memory in the system. The caller should further cap
3639 * *@pheadroom accordingly.
3640 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3641 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3642 unsigned long *pheadroom, unsigned long *pdirty,
3643 unsigned long *pwriteback)
3644 {
3645 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3646 struct mem_cgroup *parent;
3647
3648 mem_cgroup_flush_stats_ratelimited(memcg);
3649
3650 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3651 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3652 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3653 memcg_page_state(memcg, NR_ACTIVE_FILE);
3654
3655 *pheadroom = PAGE_COUNTER_MAX;
3656 while ((parent = parent_mem_cgroup(memcg))) {
3657 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3658 READ_ONCE(memcg->memory.high));
3659 unsigned long used = page_counter_read(&memcg->memory);
3660
3661 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3662 memcg = parent;
3663 }
3664 }
3665
3666 /*
3667 * Foreign dirty flushing
3668 *
3669 * There's an inherent mismatch between memcg and writeback. The former
3670 * tracks ownership per-page while the latter per-inode. This was a
3671 * deliberate design decision because honoring per-page ownership in the
3672 * writeback path is complicated, may lead to higher CPU and IO overheads
3673 * and deemed unnecessary given that write-sharing an inode across
3674 * different cgroups isn't a common use-case.
3675 *
3676 * Combined with inode majority-writer ownership switching, this works well
3677 * enough in most cases but there are some pathological cases. For
3678 * example, let's say there are two cgroups A and B which keep writing to
3679 * different but confined parts of the same inode. B owns the inode and
3680 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3681 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3682 * triggering background writeback. A will be slowed down without a way to
3683 * make writeback of the dirty pages happen.
3684 *
3685 * Conditions like the above can lead to a cgroup getting repeatedly and
3686 * severely throttled after making some progress after each
3687 * dirty_expire_interval while the underlying IO device is almost
3688 * completely idle.
3689 *
3690 * Solving this problem completely requires matching the ownership tracking
3691 * granularities between memcg and writeback in either direction. However,
3692 * the more egregious behaviors can be avoided by simply remembering the
3693 * most recent foreign dirtying events and initiating remote flushes on
3694 * them when local writeback isn't enough to keep the memory clean enough.
3695 *
3696 * The following two functions implement such mechanism. When a foreign
3697 * page - a page whose memcg and writeback ownerships don't match - is
3698 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3699 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3700 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3701 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3702 * foreign bdi_writebacks which haven't expired. Both the numbers of
3703 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3704 * limited to MEMCG_CGWB_FRN_CNT.
3705 *
3706 * The mechanism only remembers IDs and doesn't hold any object references.
3707 * As being wrong occasionally doesn't matter, updates and accesses to the
3708 * records are lockless and racy.
3709 */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3710 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3711 struct bdi_writeback *wb)
3712 {
3713 struct mem_cgroup *memcg = folio_memcg(folio);
3714 struct memcg_cgwb_frn *frn;
3715 u64 now = get_jiffies_64();
3716 u64 oldest_at = now;
3717 int oldest = -1;
3718 int i;
3719
3720 trace_track_foreign_dirty(folio, wb);
3721
3722 /*
3723 * Pick the slot to use. If there is already a slot for @wb, keep
3724 * using it. If not replace the oldest one which isn't being
3725 * written out.
3726 */
3727 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3728 frn = &memcg->cgwb_frn[i];
3729 if (frn->bdi_id == wb->bdi->id &&
3730 frn->memcg_id == wb->memcg_css->id)
3731 break;
3732 if (time_before64(frn->at, oldest_at) &&
3733 atomic_read(&frn->done.cnt) == 1) {
3734 oldest = i;
3735 oldest_at = frn->at;
3736 }
3737 }
3738
3739 if (i < MEMCG_CGWB_FRN_CNT) {
3740 /*
3741 * Re-using an existing one. Update timestamp lazily to
3742 * avoid making the cacheline hot. We want them to be
3743 * reasonably up-to-date and significantly shorter than
3744 * dirty_expire_interval as that's what expires the record.
3745 * Use the shorter of 1s and dirty_expire_interval / 8.
3746 */
3747 unsigned long update_intv =
3748 min_t(unsigned long, HZ,
3749 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3750
3751 if (time_before64(frn->at, now - update_intv))
3752 frn->at = now;
3753 } else if (oldest >= 0) {
3754 /* replace the oldest free one */
3755 frn = &memcg->cgwb_frn[oldest];
3756 frn->bdi_id = wb->bdi->id;
3757 frn->memcg_id = wb->memcg_css->id;
3758 frn->at = now;
3759 }
3760 }
3761
3762 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3763 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3764 {
3765 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3766 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3767 u64 now = jiffies_64;
3768 int i;
3769
3770 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3771 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3772
3773 /*
3774 * If the record is older than dirty_expire_interval,
3775 * writeback on it has already started. No need to kick it
3776 * off again. Also, don't start a new one if there's
3777 * already one in flight.
3778 */
3779 if (time_after64(frn->at, now - intv) &&
3780 atomic_read(&frn->done.cnt) == 1) {
3781 frn->at = 0;
3782 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3783 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3784 WB_REASON_FOREIGN_FLUSH,
3785 &frn->done);
3786 }
3787 }
3788 }
3789
3790 #else /* CONFIG_CGROUP_WRITEBACK */
3791
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3792 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3793 {
3794 return 0;
3795 }
3796
memcg_wb_domain_exit(struct mem_cgroup * memcg)3797 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3798 {
3799 }
3800
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3801 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3802 {
3803 }
3804
3805 #endif /* CONFIG_CGROUP_WRITEBACK */
3806
3807 /*
3808 * Private memory cgroup IDR
3809 *
3810 * Swap-out records and page cache shadow entries need to store memcg
3811 * references in constrained space, so we maintain an ID space that is
3812 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3813 * memory-controlled cgroups to 64k.
3814 *
3815 * However, there usually are many references to the offline CSS after
3816 * the cgroup has been destroyed, such as page cache or reclaimable
3817 * slab objects, that don't need to hang on to the ID. We want to keep
3818 * those dead CSS from occupying IDs, or we might quickly exhaust the
3819 * relatively small ID space and prevent the creation of new cgroups
3820 * even when there are much fewer than 64k cgroups - possibly none.
3821 *
3822 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3823 * be freed and recycled when it's no longer needed, which is usually
3824 * when the CSS is offlined.
3825 *
3826 * The only exception to that are records of swapped out tmpfs/shmem
3827 * pages that need to be attributed to live ancestors on swapin. But
3828 * those references are manageable from userspace.
3829 */
3830
3831 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3832 static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids);
3833
mem_cgroup_private_id_remove(struct mem_cgroup * memcg)3834 static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
3835 {
3836 if (memcg->id.id > 0) {
3837 xa_erase(&mem_cgroup_private_ids, memcg->id.id);
3838 memcg->id.id = 0;
3839 }
3840 }
3841
mem_cgroup_private_id_put(struct mem_cgroup * memcg,unsigned int n)3842 static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n)
3843 {
3844 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3845 mem_cgroup_private_id_remove(memcg);
3846
3847 /* Memcg ID pins CSS */
3848 css_put(&memcg->css);
3849 }
3850 }
3851
mem_cgroup_private_id_get_online(struct mem_cgroup * memcg,unsigned int n)3852 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n)
3853 {
3854 while (!refcount_add_not_zero(n, &memcg->id.ref)) {
3855 /*
3856 * The root cgroup cannot be destroyed, so it's refcount must
3857 * always be >= 1.
3858 */
3859 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3860 VM_BUG_ON(1);
3861 break;
3862 }
3863 memcg = parent_mem_cgroup(memcg);
3864 }
3865 return memcg;
3866 }
3867
3868 /**
3869 * mem_cgroup_from_private_id - look up a memcg from a memcg id
3870 * @id: the memcg id to look up
3871 *
3872 * Caller must hold rcu_read_lock().
3873 */
mem_cgroup_from_private_id(unsigned short id)3874 struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
3875 {
3876 WARN_ON_ONCE(!rcu_read_lock_held());
3877 return xa_load(&mem_cgroup_private_ids, id);
3878 }
3879
mem_cgroup_get_from_id(u64 id)3880 struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
3881 {
3882 struct cgroup *cgrp;
3883 struct cgroup_subsys_state *css;
3884 struct mem_cgroup *memcg = NULL;
3885
3886 cgrp = cgroup_get_from_id(id);
3887 if (IS_ERR(cgrp))
3888 return NULL;
3889
3890 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3891 if (css)
3892 memcg = container_of(css, struct mem_cgroup, css);
3893
3894 cgroup_put(cgrp);
3895
3896 return memcg;
3897 }
3898
free_mem_cgroup_per_node_info(struct mem_cgroup_per_node * pn)3899 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3900 {
3901 if (!pn)
3902 return;
3903
3904 free_percpu(pn->lruvec_stats_percpu);
3905 kfree(pn->lruvec_stats);
3906 kfree(pn);
3907 }
3908
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3909 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3910 {
3911 struct mem_cgroup_per_node *pn;
3912
3913 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3914 node);
3915 if (!pn)
3916 return false;
3917
3918 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3919 GFP_KERNEL_ACCOUNT, node);
3920 if (!pn->lruvec_stats)
3921 goto fail;
3922
3923 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3924 GFP_KERNEL_ACCOUNT);
3925 if (!pn->lruvec_stats_percpu)
3926 goto fail;
3927
3928 INIT_LIST_HEAD(&pn->objcg_list);
3929
3930 lruvec_init(&pn->lruvec);
3931 pn->memcg = memcg;
3932
3933 memcg->nodeinfo[node] = pn;
3934 return true;
3935 fail:
3936 free_mem_cgroup_per_node_info(pn);
3937 return false;
3938 }
3939
__mem_cgroup_free(struct mem_cgroup * memcg)3940 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3941 {
3942 int node;
3943
3944 for_each_node(node) {
3945 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3946 if (!pn)
3947 continue;
3948
3949 obj_cgroup_put(pn->orig_objcg);
3950 free_mem_cgroup_per_node_info(pn);
3951 }
3952 memcg1_free_events(memcg);
3953 kfree(memcg->vmstats);
3954 free_percpu(memcg->vmstats_percpu);
3955 kfree(memcg);
3956 }
3957
mem_cgroup_free(struct mem_cgroup * memcg)3958 static void mem_cgroup_free(struct mem_cgroup *memcg)
3959 {
3960 lru_gen_exit_memcg(memcg);
3961 memcg_wb_domain_exit(memcg);
3962 __mem_cgroup_free(memcg);
3963 }
3964
mem_cgroup_alloc(struct mem_cgroup * parent)3965 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3966 {
3967 struct memcg_vmstats_percpu *statc;
3968 struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3969 struct mem_cgroup *memcg;
3970 int node, cpu;
3971 int __maybe_unused i;
3972 long error;
3973
3974 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3975 if (!memcg)
3976 return ERR_PTR(-ENOMEM);
3977
3978 error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
3979 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3980 if (error)
3981 goto fail;
3982 error = -ENOMEM;
3983
3984 memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT);
3985 if (!memcg->vmstats)
3986 goto fail;
3987
3988 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3989 GFP_KERNEL_ACCOUNT);
3990 if (!memcg->vmstats_percpu)
3991 goto fail;
3992
3993 if (!memcg1_alloc_events(memcg))
3994 goto fail;
3995
3996 for_each_possible_cpu(cpu) {
3997 if (parent)
3998 pstatc_pcpu = parent->vmstats_percpu;
3999 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4000 statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
4001 statc->vmstats = memcg->vmstats;
4002 }
4003
4004 for_each_node(node)
4005 if (!alloc_mem_cgroup_per_node_info(memcg, node))
4006 goto fail;
4007
4008 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4009 goto fail;
4010
4011 INIT_WORK(&memcg->high_work, high_work_func);
4012 vmpressure_init(&memcg->vmpressure);
4013 INIT_LIST_HEAD(&memcg->memory_peaks);
4014 INIT_LIST_HEAD(&memcg->swap_peaks);
4015 spin_lock_init(&memcg->peaks_lock);
4016 memcg->socket_pressure = get_jiffies_64();
4017 #if BITS_PER_LONG < 64
4018 seqlock_init(&memcg->socket_pressure_seqlock);
4019 #endif
4020 memcg1_memcg_init(memcg);
4021 memcg->kmemcg_id = -1;
4022 #ifdef CONFIG_CGROUP_WRITEBACK
4023 INIT_LIST_HEAD(&memcg->cgwb_list);
4024 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4025 memcg->cgwb_frn[i].done =
4026 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
4027 #endif
4028 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4029 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
4030 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
4031 memcg->deferred_split_queue.split_queue_len = 0;
4032 #endif
4033 lru_gen_init_memcg(memcg);
4034 return memcg;
4035 fail:
4036 mem_cgroup_private_id_remove(memcg);
4037 __mem_cgroup_free(memcg);
4038 return ERR_PTR(error);
4039 }
4040
4041 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)4042 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4043 {
4044 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4045 struct mem_cgroup *memcg, *old_memcg;
4046 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
4047
4048 old_memcg = set_active_memcg(parent);
4049 memcg = mem_cgroup_alloc(parent);
4050 set_active_memcg(old_memcg);
4051 if (IS_ERR(memcg))
4052 return ERR_CAST(memcg);
4053
4054 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4055 memcg1_soft_limit_reset(memcg);
4056 #ifdef CONFIG_ZSWAP
4057 memcg->zswap_max = PAGE_COUNTER_MAX;
4058 WRITE_ONCE(memcg->zswap_writeback, true);
4059 #endif
4060 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4061 if (parent) {
4062 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
4063
4064 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
4065 page_counter_init(&memcg->swap, &parent->swap, false);
4066 #ifdef CONFIG_MEMCG_V1
4067 memcg->memory.track_failcnt = !memcg_on_dfl;
4068 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
4069 page_counter_init(&memcg->kmem, &parent->kmem, false);
4070 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
4071 #endif
4072 } else {
4073 init_memcg_stats();
4074 init_memcg_events();
4075 page_counter_init(&memcg->memory, NULL, true);
4076 page_counter_init(&memcg->swap, NULL, false);
4077 #ifdef CONFIG_MEMCG_V1
4078 page_counter_init(&memcg->kmem, NULL, false);
4079 page_counter_init(&memcg->tcpmem, NULL, false);
4080 #endif
4081 root_mem_cgroup = memcg;
4082 return &memcg->css;
4083 }
4084
4085 if (memcg_on_dfl && !cgroup_memory_nosocket)
4086 static_branch_inc(&memcg_sockets_enabled_key);
4087
4088 if (!cgroup_memory_nobpf)
4089 static_branch_inc(&memcg_bpf_enabled_key);
4090
4091 return &memcg->css;
4092 }
4093
mem_cgroup_css_online(struct cgroup_subsys_state * css)4094 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4095 {
4096 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4097 struct obj_cgroup *objcg;
4098 int nid;
4099
4100 memcg_online_kmem(memcg);
4101
4102 /*
4103 * A memcg must be visible for expand_shrinker_info()
4104 * by the time the maps are allocated. So, we allocate maps
4105 * here, when for_each_mem_cgroup() can't skip it.
4106 */
4107 if (alloc_shrinker_info(memcg))
4108 goto offline_kmem;
4109
4110 for_each_node(nid) {
4111 objcg = obj_cgroup_alloc();
4112 if (!objcg)
4113 goto free_objcg;
4114
4115 if (unlikely(mem_cgroup_is_root(memcg)))
4116 objcg->is_root = true;
4117
4118 objcg->memcg = memcg;
4119 rcu_assign_pointer(memcg->nodeinfo[nid]->objcg, objcg);
4120 obj_cgroup_get(objcg);
4121 memcg->nodeinfo[nid]->orig_objcg = objcg;
4122 }
4123
4124 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
4125 queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
4126 FLUSH_TIME);
4127 lru_gen_online_memcg(memcg);
4128
4129 /* Online state pins memcg ID, memcg ID pins CSS */
4130 refcount_set(&memcg->id.ref, 1);
4131 css_get(css);
4132
4133 /*
4134 * Ensure mem_cgroup_from_private_id() works once we're fully online.
4135 *
4136 * We could do this earlier and require callers to filter with
4137 * css_tryget_online(). But right now there are no users that
4138 * need earlier access, and the workingset code relies on the
4139 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
4140 * publish it here at the end of onlining. This matches the
4141 * regular ID destruction during offlining.
4142 */
4143 xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
4144
4145 return 0;
4146 free_objcg:
4147 for_each_node(nid) {
4148 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4149
4150 objcg = rcu_replace_pointer(pn->objcg, NULL, true);
4151 if (objcg)
4152 percpu_ref_kill(&objcg->refcnt);
4153
4154 if (pn->orig_objcg) {
4155 obj_cgroup_put(pn->orig_objcg);
4156 /*
4157 * Reset pn->orig_objcg to NULL to prevent
4158 * obj_cgroup_put() from being called again in
4159 * __mem_cgroup_free().
4160 */
4161 pn->orig_objcg = NULL;
4162 }
4163 }
4164 free_shrinker_info(memcg);
4165 offline_kmem:
4166 memcg_offline_kmem(memcg);
4167 mem_cgroup_private_id_remove(memcg);
4168 return -ENOMEM;
4169 }
4170
mem_cgroup_css_offline(struct cgroup_subsys_state * css)4171 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4172 {
4173 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4174
4175 memcg1_css_offline(memcg);
4176
4177 page_counter_set_min(&memcg->memory, 0);
4178 page_counter_set_low(&memcg->memory, 0);
4179
4180 zswap_memcg_offline_cleanup(memcg);
4181
4182 memcg_offline_kmem(memcg);
4183 reparent_deferred_split_queue(memcg);
4184 /*
4185 * The reparenting of objcg must be after the reparenting of the
4186 * list_lru and deferred_split_queue above, which ensures that they will
4187 * not mistakenly get the parent list_lru and deferred_split_queue.
4188 */
4189 memcg_reparent_objcgs(memcg);
4190 reparent_shrinker_deferred(memcg);
4191 wb_memcg_offline(memcg);
4192 lru_gen_offline_memcg(memcg);
4193
4194 drain_all_stock(memcg);
4195
4196 mem_cgroup_private_id_put(memcg, 1);
4197 }
4198
mem_cgroup_css_released(struct cgroup_subsys_state * css)4199 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4200 {
4201 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4202
4203 invalidate_reclaim_iterators(memcg);
4204 lru_gen_release_memcg(memcg);
4205 }
4206
mem_cgroup_css_free(struct cgroup_subsys_state * css)4207 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4208 {
4209 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4210 int __maybe_unused i;
4211
4212 #ifdef CONFIG_CGROUP_WRITEBACK
4213 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4214 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
4215 #endif
4216 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4217 static_branch_dec(&memcg_sockets_enabled_key);
4218
4219 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
4220 static_branch_dec(&memcg_sockets_enabled_key);
4221
4222 if (!cgroup_memory_nobpf)
4223 static_branch_dec(&memcg_bpf_enabled_key);
4224
4225 vmpressure_cleanup(&memcg->vmpressure);
4226 cancel_work_sync(&memcg->high_work);
4227 memcg1_remove_from_trees(memcg);
4228 free_shrinker_info(memcg);
4229 mem_cgroup_free(memcg);
4230 }
4231
4232 /**
4233 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4234 * @css: the target css
4235 *
4236 * Reset the states of the mem_cgroup associated with @css. This is
4237 * invoked when the userland requests disabling on the default hierarchy
4238 * but the memcg is pinned through dependency. The memcg should stop
4239 * applying policies and should revert to the vanilla state as it may be
4240 * made visible again.
4241 *
4242 * The current implementation only resets the essential configurations.
4243 * This needs to be expanded to cover all the visible parts.
4244 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)4245 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4246 {
4247 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4248
4249 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4250 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4251 #ifdef CONFIG_MEMCG_V1
4252 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4253 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
4254 #endif
4255 page_counter_set_min(&memcg->memory, 0);
4256 page_counter_set_low(&memcg->memory, 0);
4257 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4258 memcg1_soft_limit_reset(memcg);
4259 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4260 memcg_wb_domain_size_changed(memcg);
4261 }
4262
4263 struct aggregate_control {
4264 /* pointer to the aggregated (CPU and subtree aggregated) counters */
4265 long *aggregate;
4266 /* pointer to the non-hierarchichal (CPU aggregated) counters */
4267 long *local;
4268 /* pointer to the pending child counters during tree propagation */
4269 long *pending;
4270 /* pointer to the parent's pending counters, could be NULL */
4271 long *ppending;
4272 /* pointer to the percpu counters to be aggregated */
4273 long *cstat;
4274 /* pointer to the percpu counters of the last aggregation*/
4275 long *cstat_prev;
4276 /* size of the above counters */
4277 int size;
4278 };
4279
mem_cgroup_stat_aggregate(struct aggregate_control * ac)4280 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
4281 {
4282 int i;
4283 long delta, delta_cpu, v;
4284
4285 for (i = 0; i < ac->size; i++) {
4286 /*
4287 * Collect the aggregated propagation counts of groups
4288 * below us. We're in a per-cpu loop here and this is
4289 * a global counter, so the first cycle will get them.
4290 */
4291 delta = ac->pending[i];
4292 if (delta)
4293 ac->pending[i] = 0;
4294
4295 /* Add CPU changes on this level since the last flush */
4296 delta_cpu = 0;
4297 v = READ_ONCE(ac->cstat[i]);
4298 if (v != ac->cstat_prev[i]) {
4299 delta_cpu = v - ac->cstat_prev[i];
4300 delta += delta_cpu;
4301 ac->cstat_prev[i] = v;
4302 }
4303
4304 /* Aggregate counts on this level and propagate upwards */
4305 if (delta_cpu)
4306 ac->local[i] += delta_cpu;
4307
4308 if (delta) {
4309 ac->aggregate[i] += delta;
4310 if (ac->ppending)
4311 ac->ppending[i] += delta;
4312 }
4313 }
4314 }
4315
4316 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4317 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4318 int cpu)
4319 {
4320 int nid;
4321
4322 if (atomic_read(&memcg->kmem_stat)) {
4323 int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4324 int index = memcg_stats_index(MEMCG_KMEM);
4325
4326 memcg->vmstats->state[index] += kmem;
4327 if (parent)
4328 parent->vmstats->state_pending[index] += kmem;
4329 }
4330
4331 for_each_node_state(nid, N_MEMORY) {
4332 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4333 struct lruvec_stats *lstats = pn->lruvec_stats;
4334 struct lruvec_stats *plstats = NULL;
4335
4336 if (parent)
4337 plstats = parent->nodeinfo[nid]->lruvec_stats;
4338
4339 if (atomic_read(&pn->slab_reclaimable)) {
4340 int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4341 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4342
4343 lstats->state[index] += slab;
4344 if (plstats)
4345 plstats->state_pending[index] += slab;
4346 }
4347 if (atomic_read(&pn->slab_unreclaimable)) {
4348 int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4349 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4350
4351 lstats->state[index] += slab;
4352 if (plstats)
4353 plstats->state_pending[index] += slab;
4354 }
4355 }
4356 }
4357 #else
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4358 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4359 int cpu)
4360 {}
4361 #endif
4362
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)4363 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4364 {
4365 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4366 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4367 struct memcg_vmstats_percpu *statc;
4368 struct aggregate_control ac;
4369 int nid;
4370
4371 flush_nmi_stats(memcg, parent, cpu);
4372
4373 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4374
4375 ac = (struct aggregate_control) {
4376 .aggregate = memcg->vmstats->state,
4377 .local = memcg->vmstats->state_local,
4378 .pending = memcg->vmstats->state_pending,
4379 .ppending = parent ? parent->vmstats->state_pending : NULL,
4380 .cstat = statc->state,
4381 .cstat_prev = statc->state_prev,
4382 .size = MEMCG_VMSTAT_SIZE,
4383 };
4384 mem_cgroup_stat_aggregate(&ac);
4385
4386 ac = (struct aggregate_control) {
4387 .aggregate = memcg->vmstats->events,
4388 .local = memcg->vmstats->events_local,
4389 .pending = memcg->vmstats->events_pending,
4390 .ppending = parent ? parent->vmstats->events_pending : NULL,
4391 .cstat = statc->events,
4392 .cstat_prev = statc->events_prev,
4393 .size = NR_MEMCG_EVENTS,
4394 };
4395 mem_cgroup_stat_aggregate(&ac);
4396
4397 for_each_node_state(nid, N_MEMORY) {
4398 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4399 struct lruvec_stats *lstats = pn->lruvec_stats;
4400 struct lruvec_stats *plstats = NULL;
4401 struct lruvec_stats_percpu *lstatc;
4402
4403 if (parent)
4404 plstats = parent->nodeinfo[nid]->lruvec_stats;
4405
4406 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4407
4408 ac = (struct aggregate_control) {
4409 .aggregate = lstats->state,
4410 .local = lstats->state_local,
4411 .pending = lstats->state_pending,
4412 .ppending = plstats ? plstats->state_pending : NULL,
4413 .cstat = lstatc->state,
4414 .cstat_prev = lstatc->state_prev,
4415 .size = NR_MEMCG_NODE_STAT_ITEMS,
4416 };
4417 mem_cgroup_stat_aggregate(&ac);
4418
4419 }
4420 WRITE_ONCE(statc->stats_updates, 0);
4421 /* We are in a per-cpu loop here, only do the atomic write once */
4422 if (atomic_long_read(&memcg->vmstats->stats_updates))
4423 atomic_long_set(&memcg->vmstats->stats_updates, 0);
4424 }
4425
mem_cgroup_fork(struct task_struct * task)4426 static void mem_cgroup_fork(struct task_struct *task)
4427 {
4428 /*
4429 * Set the update flag to cause task->objcg to be initialized lazily
4430 * on the first allocation. It can be done without any synchronization
4431 * because it's always performed on the current task, so does
4432 * current_objcg_update().
4433 */
4434 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4435 }
4436
mem_cgroup_exit(struct task_struct * task)4437 static void mem_cgroup_exit(struct task_struct *task)
4438 {
4439 struct obj_cgroup *objcg = task->objcg;
4440
4441 objcg = (struct obj_cgroup *)
4442 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4443 obj_cgroup_put(objcg);
4444
4445 /*
4446 * Some kernel allocations can happen after this point,
4447 * but let's ignore them. It can be done without any synchronization
4448 * because it's always performed on the current task, so does
4449 * current_objcg_update().
4450 */
4451 task->objcg = NULL;
4452 }
4453
4454 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4455 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4456 {
4457 struct task_struct *task;
4458 struct cgroup_subsys_state *css;
4459
4460 /* find the first leader if there is any */
4461 cgroup_taskset_for_each_leader(task, css, tset)
4462 break;
4463
4464 if (!task)
4465 return;
4466
4467 task_lock(task);
4468 if (task->mm && READ_ONCE(task->mm->owner) == task)
4469 lru_gen_migrate_mm(task->mm);
4470 task_unlock(task);
4471 }
4472 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4473 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4474 #endif /* CONFIG_LRU_GEN */
4475
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)4476 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4477 {
4478 struct task_struct *task;
4479 struct cgroup_subsys_state *css;
4480
4481 cgroup_taskset_for_each(task, css, tset) {
4482 /* atomically set the update bit */
4483 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4484 }
4485 }
4486
mem_cgroup_attach(struct cgroup_taskset * tset)4487 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4488 {
4489 mem_cgroup_lru_gen_attach(tset);
4490 mem_cgroup_kmem_attach(tset);
4491 }
4492
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)4493 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4494 {
4495 if (value == PAGE_COUNTER_MAX)
4496 seq_puts(m, "max\n");
4497 else
4498 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4499
4500 return 0;
4501 }
4502
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)4503 static u64 memory_current_read(struct cgroup_subsys_state *css,
4504 struct cftype *cft)
4505 {
4506 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4507
4508 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4509 }
4510
4511 #define OFP_PEAK_UNSET (((-1UL)))
4512
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)4513 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4514 {
4515 struct cgroup_of_peak *ofp = of_peak(sf->private);
4516 u64 fd_peak = READ_ONCE(ofp->value), peak;
4517
4518 /* User wants global or local peak? */
4519 if (fd_peak == OFP_PEAK_UNSET)
4520 peak = pc->watermark;
4521 else
4522 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4523
4524 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4525 return 0;
4526 }
4527
memory_peak_show(struct seq_file * sf,void * v)4528 static int memory_peak_show(struct seq_file *sf, void *v)
4529 {
4530 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4531
4532 return peak_show(sf, v, &memcg->memory);
4533 }
4534
peak_open(struct kernfs_open_file * of)4535 static int peak_open(struct kernfs_open_file *of)
4536 {
4537 struct cgroup_of_peak *ofp = of_peak(of);
4538
4539 ofp->value = OFP_PEAK_UNSET;
4540 return 0;
4541 }
4542
peak_release(struct kernfs_open_file * of)4543 static void peak_release(struct kernfs_open_file *of)
4544 {
4545 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4546 struct cgroup_of_peak *ofp = of_peak(of);
4547
4548 if (ofp->value == OFP_PEAK_UNSET) {
4549 /* fast path (no writes on this fd) */
4550 return;
4551 }
4552 spin_lock(&memcg->peaks_lock);
4553 list_del(&ofp->list);
4554 spin_unlock(&memcg->peaks_lock);
4555 }
4556
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)4557 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4558 loff_t off, struct page_counter *pc,
4559 struct list_head *watchers)
4560 {
4561 unsigned long usage;
4562 struct cgroup_of_peak *peer_ctx;
4563 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4564 struct cgroup_of_peak *ofp = of_peak(of);
4565
4566 spin_lock(&memcg->peaks_lock);
4567
4568 usage = page_counter_read(pc);
4569 WRITE_ONCE(pc->local_watermark, usage);
4570
4571 list_for_each_entry(peer_ctx, watchers, list)
4572 if (usage > peer_ctx->value)
4573 WRITE_ONCE(peer_ctx->value, usage);
4574
4575 /* initial write, register watcher */
4576 if (ofp->value == OFP_PEAK_UNSET)
4577 list_add(&ofp->list, watchers);
4578
4579 WRITE_ONCE(ofp->value, usage);
4580 spin_unlock(&memcg->peaks_lock);
4581
4582 return nbytes;
4583 }
4584
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4585 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4586 size_t nbytes, loff_t off)
4587 {
4588 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4589
4590 return peak_write(of, buf, nbytes, off, &memcg->memory,
4591 &memcg->memory_peaks);
4592 }
4593
4594 #undef OFP_PEAK_UNSET
4595
memory_min_show(struct seq_file * m,void * v)4596 static int memory_min_show(struct seq_file *m, void *v)
4597 {
4598 return seq_puts_memcg_tunable(m,
4599 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4600 }
4601
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4602 static ssize_t memory_min_write(struct kernfs_open_file *of,
4603 char *buf, size_t nbytes, loff_t off)
4604 {
4605 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4606 unsigned long min;
4607 int err;
4608
4609 buf = strstrip(buf);
4610 err = page_counter_memparse(buf, "max", &min);
4611 if (err)
4612 return err;
4613
4614 page_counter_set_min(&memcg->memory, min);
4615
4616 return nbytes;
4617 }
4618
memory_low_show(struct seq_file * m,void * v)4619 static int memory_low_show(struct seq_file *m, void *v)
4620 {
4621 return seq_puts_memcg_tunable(m,
4622 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4623 }
4624
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4625 static ssize_t memory_low_write(struct kernfs_open_file *of,
4626 char *buf, size_t nbytes, loff_t off)
4627 {
4628 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4629 unsigned long low;
4630 int err;
4631
4632 buf = strstrip(buf);
4633 err = page_counter_memparse(buf, "max", &low);
4634 if (err)
4635 return err;
4636
4637 page_counter_set_low(&memcg->memory, low);
4638
4639 return nbytes;
4640 }
4641
memory_high_show(struct seq_file * m,void * v)4642 static int memory_high_show(struct seq_file *m, void *v)
4643 {
4644 return seq_puts_memcg_tunable(m,
4645 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4646 }
4647
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4648 static ssize_t memory_high_write(struct kernfs_open_file *of,
4649 char *buf, size_t nbytes, loff_t off)
4650 {
4651 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4652 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4653 bool drained = false;
4654 unsigned long high;
4655 int err;
4656
4657 buf = strstrip(buf);
4658 err = page_counter_memparse(buf, "max", &high);
4659 if (err)
4660 return err;
4661
4662 page_counter_set_high(&memcg->memory, high);
4663
4664 if (of->file->f_flags & O_NONBLOCK)
4665 goto out;
4666
4667 for (;;) {
4668 unsigned long nr_pages = page_counter_read(&memcg->memory);
4669 unsigned long reclaimed;
4670
4671 if (nr_pages <= high)
4672 break;
4673
4674 if (signal_pending(current))
4675 break;
4676
4677 if (!drained) {
4678 drain_all_stock(memcg);
4679 drained = true;
4680 continue;
4681 }
4682
4683 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4684 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4685
4686 if (!reclaimed && !nr_retries--)
4687 break;
4688 }
4689 out:
4690 memcg_wb_domain_size_changed(memcg);
4691 return nbytes;
4692 }
4693
memory_max_show(struct seq_file * m,void * v)4694 static int memory_max_show(struct seq_file *m, void *v)
4695 {
4696 return seq_puts_memcg_tunable(m,
4697 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4698 }
4699
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4700 static ssize_t memory_max_write(struct kernfs_open_file *of,
4701 char *buf, size_t nbytes, loff_t off)
4702 {
4703 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4704 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4705 bool drained = false;
4706 unsigned long max;
4707 int err;
4708
4709 buf = strstrip(buf);
4710 err = page_counter_memparse(buf, "max", &max);
4711 if (err)
4712 return err;
4713
4714 xchg(&memcg->memory.max, max);
4715
4716 if (of->file->f_flags & O_NONBLOCK)
4717 goto out;
4718
4719 for (;;) {
4720 unsigned long nr_pages = page_counter_read(&memcg->memory);
4721
4722 if (nr_pages <= max)
4723 break;
4724
4725 if (signal_pending(current))
4726 break;
4727
4728 if (!drained) {
4729 drain_all_stock(memcg);
4730 drained = true;
4731 continue;
4732 }
4733
4734 if (nr_reclaims) {
4735 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4736 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4737 nr_reclaims--;
4738 continue;
4739 }
4740
4741 memcg_memory_event(memcg, MEMCG_OOM);
4742 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4743 break;
4744 cond_resched();
4745 }
4746 out:
4747 memcg_wb_domain_size_changed(memcg);
4748 return nbytes;
4749 }
4750
4751 /*
4752 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4753 * if any new events become available.
4754 */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4755 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4756 {
4757 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4758 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4759 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4760 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4761 seq_printf(m, "oom_kill %lu\n",
4762 atomic_long_read(&events[MEMCG_OOM_KILL]));
4763 seq_printf(m, "oom_group_kill %lu\n",
4764 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4765 seq_printf(m, "sock_throttled %lu\n",
4766 atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
4767 }
4768
memory_events_show(struct seq_file * m,void * v)4769 static int memory_events_show(struct seq_file *m, void *v)
4770 {
4771 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4772
4773 __memory_events_show(m, memcg->memory_events);
4774 return 0;
4775 }
4776
memory_events_local_show(struct seq_file * m,void * v)4777 static int memory_events_local_show(struct seq_file *m, void *v)
4778 {
4779 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4780
4781 __memory_events_show(m, memcg->memory_events_local);
4782 return 0;
4783 }
4784
memory_stat_show(struct seq_file * m,void * v)4785 int memory_stat_show(struct seq_file *m, void *v)
4786 {
4787 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4788 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4789 struct seq_buf s;
4790
4791 if (!buf)
4792 return -ENOMEM;
4793 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4794 memory_stat_format(memcg, &s);
4795 seq_puts(m, buf);
4796 kfree(buf);
4797 return 0;
4798 }
4799
4800 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4801 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4802 int item)
4803 {
4804 return lruvec_page_state(lruvec, item) *
4805 memcg_page_state_output_unit(item);
4806 }
4807
memory_numa_stat_show(struct seq_file * m,void * v)4808 static int memory_numa_stat_show(struct seq_file *m, void *v)
4809 {
4810 int i;
4811 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4812
4813 mem_cgroup_flush_stats(memcg);
4814
4815 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4816 int nid;
4817
4818 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4819 continue;
4820
4821 seq_printf(m, "%s", memory_stats[i].name);
4822 for_each_node_state(nid, N_MEMORY) {
4823 u64 size;
4824 struct lruvec *lruvec;
4825
4826 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4827 size = lruvec_page_state_output(lruvec,
4828 memory_stats[i].idx);
4829 seq_printf(m, " N%d=%llu", nid, size);
4830 }
4831 seq_putc(m, '\n');
4832 }
4833
4834 return 0;
4835 }
4836 #endif
4837
memory_oom_group_show(struct seq_file * m,void * v)4838 static int memory_oom_group_show(struct seq_file *m, void *v)
4839 {
4840 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4841
4842 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4843
4844 return 0;
4845 }
4846
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4847 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4848 char *buf, size_t nbytes, loff_t off)
4849 {
4850 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4851 int ret, oom_group;
4852
4853 buf = strstrip(buf);
4854 if (!buf)
4855 return -EINVAL;
4856
4857 ret = kstrtoint(buf, 0, &oom_group);
4858 if (ret)
4859 return ret;
4860
4861 if (oom_group != 0 && oom_group != 1)
4862 return -EINVAL;
4863
4864 WRITE_ONCE(memcg->oom_group, oom_group);
4865
4866 return nbytes;
4867 }
4868
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4869 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4870 size_t nbytes, loff_t off)
4871 {
4872 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4873 int ret;
4874
4875 ret = user_proactive_reclaim(buf, memcg, NULL);
4876 if (ret)
4877 return ret;
4878
4879 return nbytes;
4880 }
4881
4882 static struct cftype memory_files[] = {
4883 {
4884 .name = "current",
4885 .flags = CFTYPE_NOT_ON_ROOT,
4886 .read_u64 = memory_current_read,
4887 },
4888 {
4889 .name = "peak",
4890 .flags = CFTYPE_NOT_ON_ROOT,
4891 .open = peak_open,
4892 .release = peak_release,
4893 .seq_show = memory_peak_show,
4894 .write = memory_peak_write,
4895 },
4896 {
4897 .name = "min",
4898 .flags = CFTYPE_NOT_ON_ROOT,
4899 .seq_show = memory_min_show,
4900 .write = memory_min_write,
4901 },
4902 {
4903 .name = "low",
4904 .flags = CFTYPE_NOT_ON_ROOT,
4905 .seq_show = memory_low_show,
4906 .write = memory_low_write,
4907 },
4908 {
4909 .name = "high",
4910 .flags = CFTYPE_NOT_ON_ROOT,
4911 .seq_show = memory_high_show,
4912 .write = memory_high_write,
4913 },
4914 {
4915 .name = "max",
4916 .flags = CFTYPE_NOT_ON_ROOT,
4917 .seq_show = memory_max_show,
4918 .write = memory_max_write,
4919 },
4920 {
4921 .name = "events",
4922 .flags = CFTYPE_NOT_ON_ROOT,
4923 .file_offset = offsetof(struct mem_cgroup, events_file),
4924 .seq_show = memory_events_show,
4925 },
4926 {
4927 .name = "events.local",
4928 .flags = CFTYPE_NOT_ON_ROOT,
4929 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4930 .seq_show = memory_events_local_show,
4931 },
4932 {
4933 .name = "stat",
4934 .seq_show = memory_stat_show,
4935 },
4936 #ifdef CONFIG_NUMA
4937 {
4938 .name = "numa_stat",
4939 .seq_show = memory_numa_stat_show,
4940 },
4941 #endif
4942 {
4943 .name = "oom.group",
4944 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4945 .seq_show = memory_oom_group_show,
4946 .write = memory_oom_group_write,
4947 },
4948 {
4949 .name = "reclaim",
4950 .flags = CFTYPE_NS_DELEGATABLE,
4951 .write = memory_reclaim,
4952 },
4953 { } /* terminate */
4954 };
4955
4956 struct cgroup_subsys memory_cgrp_subsys = {
4957 .css_alloc = mem_cgroup_css_alloc,
4958 .css_online = mem_cgroup_css_online,
4959 .css_offline = mem_cgroup_css_offline,
4960 .css_released = mem_cgroup_css_released,
4961 .css_free = mem_cgroup_css_free,
4962 .css_reset = mem_cgroup_css_reset,
4963 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4964 .attach = mem_cgroup_attach,
4965 .fork = mem_cgroup_fork,
4966 .exit = mem_cgroup_exit,
4967 .dfl_cftypes = memory_files,
4968 #ifdef CONFIG_MEMCG_V1
4969 .legacy_cftypes = mem_cgroup_legacy_files,
4970 #endif
4971 .early_init = 0,
4972 };
4973
4974 /**
4975 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4976 * @root: the top ancestor of the sub-tree being checked
4977 * @memcg: the memory cgroup to check
4978 *
4979 * WARNING: This function is not stateless! It can only be used as part
4980 * of a top-down tree iteration, not for isolated queries.
4981 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4982 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4983 struct mem_cgroup *memcg)
4984 {
4985 bool recursive_protection =
4986 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4987
4988 if (mem_cgroup_disabled())
4989 return;
4990
4991 if (!root)
4992 root = root_mem_cgroup;
4993
4994 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4995 }
4996
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4997 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4998 gfp_t gfp)
4999 {
5000 int ret = 0;
5001 struct obj_cgroup *objcg;
5002
5003 objcg = get_obj_cgroup_from_memcg(memcg);
5004 /* Do not account at the root objcg level. */
5005 if (!obj_cgroup_is_root(objcg))
5006 ret = try_charge_memcg(memcg, gfp, folio_nr_pages(folio));
5007 if (ret) {
5008 obj_cgroup_put(objcg);
5009 return ret;
5010 }
5011 commit_charge(folio, objcg);
5012 memcg1_commit_charge(folio, memcg);
5013
5014 return ret;
5015 }
5016
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)5017 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
5018 {
5019 struct mem_cgroup *memcg;
5020 int ret;
5021
5022 memcg = get_mem_cgroup_from_mm(mm);
5023 ret = charge_memcg(folio, memcg, gfp);
5024 css_put(&memcg->css);
5025
5026 return ret;
5027 }
5028
5029 /**
5030 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
5031 * @folio: folio being charged
5032 * @gfp: reclaim mode
5033 *
5034 * This function is called when allocating a huge page folio, after the page has
5035 * already been obtained and charged to the appropriate hugetlb cgroup
5036 * controller (if it is enabled).
5037 *
5038 * Returns ENOMEM if the memcg is already full.
5039 * Returns 0 if either the charge was successful, or if we skip the charging.
5040 */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)5041 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
5042 {
5043 struct mem_cgroup *memcg = get_mem_cgroup_from_current();
5044 int ret = 0;
5045
5046 /*
5047 * Even memcg does not account for hugetlb, we still want to update
5048 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
5049 * charging the memcg.
5050 */
5051 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
5052 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5053 goto out;
5054
5055 if (charge_memcg(folio, memcg, gfp))
5056 ret = -ENOMEM;
5057
5058 out:
5059 mem_cgroup_put(memcg);
5060 return ret;
5061 }
5062
5063 /**
5064 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
5065 * @folio: folio to charge.
5066 * @mm: mm context of the victim
5067 * @gfp: reclaim mode
5068 * @entry: swap entry for which the folio is allocated
5069 *
5070 * This function charges a folio allocated for swapin. Please call this before
5071 * adding the folio to the swapcache.
5072 *
5073 * Returns 0 on success. Otherwise, an error code is returned.
5074 */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)5075 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
5076 gfp_t gfp, swp_entry_t entry)
5077 {
5078 struct mem_cgroup *memcg;
5079 unsigned short id;
5080 int ret;
5081
5082 if (mem_cgroup_disabled())
5083 return 0;
5084
5085 id = lookup_swap_cgroup_id(entry);
5086 rcu_read_lock();
5087 memcg = mem_cgroup_from_private_id(id);
5088 if (!memcg || !css_tryget_online(&memcg->css))
5089 memcg = get_mem_cgroup_from_mm(mm);
5090 rcu_read_unlock();
5091
5092 ret = charge_memcg(folio, memcg, gfp);
5093
5094 css_put(&memcg->css);
5095 return ret;
5096 }
5097
5098 struct uncharge_gather {
5099 struct obj_cgroup *objcg;
5100 unsigned long nr_memory;
5101 unsigned long pgpgout;
5102 unsigned long nr_kmem;
5103 int nid;
5104 };
5105
uncharge_gather_clear(struct uncharge_gather * ug)5106 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5107 {
5108 memset(ug, 0, sizeof(*ug));
5109 }
5110
uncharge_batch(const struct uncharge_gather * ug)5111 static void uncharge_batch(const struct uncharge_gather *ug)
5112 {
5113 struct mem_cgroup *memcg;
5114
5115 rcu_read_lock();
5116 memcg = obj_cgroup_memcg(ug->objcg);
5117 if (ug->nr_memory) {
5118 memcg_uncharge(memcg, ug->nr_memory);
5119 if (ug->nr_kmem) {
5120 mod_memcg_state(memcg, MEMCG_KMEM, -ug->nr_kmem);
5121 memcg1_account_kmem(memcg, -ug->nr_kmem);
5122 }
5123 memcg1_oom_recover(memcg);
5124 }
5125
5126 memcg1_uncharge_batch(memcg, ug->pgpgout, ug->nr_memory, ug->nid);
5127 rcu_read_unlock();
5128
5129 /* drop reference from uncharge_folio */
5130 obj_cgroup_put(ug->objcg);
5131 }
5132
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)5133 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
5134 {
5135 long nr_pages;
5136 struct obj_cgroup *objcg;
5137
5138 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5139
5140 /*
5141 * Nobody should be changing or seriously looking at
5142 * folio objcg at this point, we have fully exclusive
5143 * access to the folio.
5144 */
5145 objcg = folio_objcg(folio);
5146 if (!objcg)
5147 return;
5148
5149 if (ug->objcg != objcg) {
5150 if (ug->objcg) {
5151 uncharge_batch(ug);
5152 uncharge_gather_clear(ug);
5153 }
5154 ug->objcg = objcg;
5155 ug->nid = folio_nid(folio);
5156
5157 /* pairs with obj_cgroup_put in uncharge_batch */
5158 obj_cgroup_get(objcg);
5159 }
5160
5161 nr_pages = folio_nr_pages(folio);
5162
5163 if (folio_memcg_kmem(folio)) {
5164 ug->nr_memory += nr_pages;
5165 ug->nr_kmem += nr_pages;
5166 } else {
5167 /* LRU pages aren't accounted at the root level */
5168 if (!obj_cgroup_is_root(objcg))
5169 ug->nr_memory += nr_pages;
5170 ug->pgpgout++;
5171
5172 WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
5173 }
5174
5175 folio->memcg_data = 0;
5176 obj_cgroup_put(objcg);
5177 }
5178
__mem_cgroup_uncharge(struct folio * folio)5179 void __mem_cgroup_uncharge(struct folio *folio)
5180 {
5181 struct uncharge_gather ug;
5182
5183 /* Don't touch folio->lru of any random page, pre-check: */
5184 if (!folio_memcg_charged(folio))
5185 return;
5186
5187 uncharge_gather_clear(&ug);
5188 uncharge_folio(folio, &ug);
5189 uncharge_batch(&ug);
5190 }
5191
__mem_cgroup_uncharge_folios(struct folio_batch * folios)5192 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
5193 {
5194 struct uncharge_gather ug;
5195 unsigned int i;
5196
5197 uncharge_gather_clear(&ug);
5198 for (i = 0; i < folios->nr; i++)
5199 uncharge_folio(folios->folios[i], &ug);
5200 if (ug.objcg)
5201 uncharge_batch(&ug);
5202 }
5203
5204 /**
5205 * mem_cgroup_replace_folio - Charge a folio's replacement.
5206 * @old: Currently circulating folio.
5207 * @new: Replacement folio.
5208 *
5209 * Charge @new as a replacement folio for @old. @old will
5210 * be uncharged upon free.
5211 *
5212 * Both folios must be locked, @new->mapping must be set up.
5213 */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)5214 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
5215 {
5216 struct mem_cgroup *memcg;
5217 struct obj_cgroup *objcg;
5218 long nr_pages = folio_nr_pages(new);
5219
5220 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5221 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5222 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5223 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
5224
5225 if (mem_cgroup_disabled())
5226 return;
5227
5228 /* Page cache replacement: new folio already charged? */
5229 if (folio_memcg_charged(new))
5230 return;
5231
5232 objcg = folio_objcg(old);
5233 VM_WARN_ON_ONCE_FOLIO(!objcg, old);
5234 if (!objcg)
5235 return;
5236
5237 rcu_read_lock();
5238 memcg = obj_cgroup_memcg(objcg);
5239 /* Force-charge the new page. The old one will be freed soon */
5240 if (!obj_cgroup_is_root(objcg)) {
5241 page_counter_charge(&memcg->memory, nr_pages);
5242 if (do_memsw_account())
5243 page_counter_charge(&memcg->memsw, nr_pages);
5244 }
5245
5246 obj_cgroup_get(objcg);
5247 commit_charge(new, objcg);
5248 memcg1_commit_charge(new, memcg);
5249 rcu_read_unlock();
5250 }
5251
5252 /**
5253 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
5254 * @old: Currently circulating folio.
5255 * @new: Replacement folio.
5256 *
5257 * Transfer the memcg data from the old folio to the new folio for migration.
5258 * The old folio's data info will be cleared. Note that the memory counters
5259 * will remain unchanged throughout the process.
5260 *
5261 * Both folios must be locked, @new->mapping must be set up.
5262 */
mem_cgroup_migrate(struct folio * old,struct folio * new)5263 void mem_cgroup_migrate(struct folio *old, struct folio *new)
5264 {
5265 struct obj_cgroup *objcg;
5266
5267 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5268 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5269 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5270 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
5271 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
5272
5273 if (mem_cgroup_disabled())
5274 return;
5275
5276 objcg = folio_objcg(old);
5277 /*
5278 * Note that it is normal to see !objcg for a hugetlb folio.
5279 * For e.g, it could have been allocated when memory_hugetlb_accounting
5280 * was not selected.
5281 */
5282 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !objcg, old);
5283 if (!objcg)
5284 return;
5285
5286 /* Transfer the charge and the objcg ref */
5287 commit_charge(new, objcg);
5288
5289 /* Warning should never happen, so don't worry about refcount non-0 */
5290 WARN_ON_ONCE(folio_unqueue_deferred_split(old));
5291 old->memcg_data = 0;
5292 }
5293
5294 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5295 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5296
mem_cgroup_sk_alloc(struct sock * sk)5297 void mem_cgroup_sk_alloc(struct sock *sk)
5298 {
5299 struct mem_cgroup *memcg;
5300
5301 if (!mem_cgroup_sockets_enabled)
5302 return;
5303
5304 /* Do not associate the sock with unrelated interrupted task's memcg. */
5305 if (!in_task())
5306 return;
5307
5308 rcu_read_lock();
5309 memcg = mem_cgroup_from_task(current);
5310 if (mem_cgroup_is_root(memcg))
5311 goto out;
5312 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5313 goto out;
5314 if (css_tryget(&memcg->css))
5315 sk->sk_memcg = memcg;
5316 out:
5317 rcu_read_unlock();
5318 }
5319
mem_cgroup_sk_free(struct sock * sk)5320 void mem_cgroup_sk_free(struct sock *sk)
5321 {
5322 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5323
5324 if (memcg)
5325 css_put(&memcg->css);
5326 }
5327
mem_cgroup_sk_inherit(const struct sock * sk,struct sock * newsk)5328 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
5329 {
5330 struct mem_cgroup *memcg;
5331
5332 if (sk->sk_memcg == newsk->sk_memcg)
5333 return;
5334
5335 mem_cgroup_sk_free(newsk);
5336
5337 memcg = mem_cgroup_from_sk(sk);
5338 if (memcg)
5339 css_get(&memcg->css);
5340
5341 newsk->sk_memcg = sk->sk_memcg;
5342 }
5343
5344 /**
5345 * mem_cgroup_sk_charge - charge socket memory
5346 * @sk: socket in memcg to charge
5347 * @nr_pages: number of pages to charge
5348 * @gfp_mask: reclaim mode
5349 *
5350 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5351 * @memcg's configured limit, %false if it doesn't.
5352 */
mem_cgroup_sk_charge(const struct sock * sk,unsigned int nr_pages,gfp_t gfp_mask)5353 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
5354 gfp_t gfp_mask)
5355 {
5356 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5357
5358 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5359 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5360
5361 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5362 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5363 return true;
5364 }
5365
5366 return false;
5367 }
5368
5369 /**
5370 * mem_cgroup_sk_uncharge - uncharge socket memory
5371 * @sk: socket in memcg to uncharge
5372 * @nr_pages: number of pages to uncharge
5373 */
mem_cgroup_sk_uncharge(const struct sock * sk,unsigned int nr_pages)5374 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages)
5375 {
5376 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5377
5378 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5379 memcg1_uncharge_skmem(memcg, nr_pages);
5380 return;
5381 }
5382
5383 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5384
5385 refill_stock(memcg, nr_pages);
5386 }
5387
mem_cgroup_flush_workqueue(void)5388 void mem_cgroup_flush_workqueue(void)
5389 {
5390 flush_workqueue(memcg_wq);
5391 }
5392
cgroup_memory(char * s)5393 static int __init cgroup_memory(char *s)
5394 {
5395 char *token;
5396
5397 while ((token = strsep(&s, ",")) != NULL) {
5398 if (!*token)
5399 continue;
5400 if (!strcmp(token, "nosocket"))
5401 cgroup_memory_nosocket = true;
5402 if (!strcmp(token, "nokmem"))
5403 cgroup_memory_nokmem = true;
5404 if (!strcmp(token, "nobpf"))
5405 cgroup_memory_nobpf = true;
5406 }
5407 return 1;
5408 }
5409 __setup("cgroup.memory=", cgroup_memory);
5410
5411 /*
5412 * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5413 *
5414 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5415 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5416 * basically everything that doesn't depend on a specific mem_cgroup structure
5417 * should be initialized from here.
5418 */
mem_cgroup_init(void)5419 int __init mem_cgroup_init(void)
5420 {
5421 unsigned int memcg_size;
5422 int cpu;
5423
5424 /*
5425 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5426 * used for per-memcg-per-cpu caching of per-node statistics. In order
5427 * to work fine, we should make sure that the overfill threshold can't
5428 * exceed S32_MAX / PAGE_SIZE.
5429 */
5430 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5431
5432 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5433 memcg_hotplug_cpu_dead);
5434
5435 memcg_wq = alloc_workqueue("memcg", WQ_PERCPU, 0);
5436 WARN_ON(!memcg_wq);
5437
5438 for_each_possible_cpu(cpu) {
5439 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5440 drain_local_memcg_stock);
5441 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5442 drain_local_obj_stock);
5443 }
5444
5445 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5446 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5447 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5448
5449 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5450 SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5451
5452 return 0;
5453 }
5454
5455 #ifdef CONFIG_SWAP
5456 /**
5457 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5458 * @folio: folio being added to swap
5459 * @entry: swap entry to charge
5460 *
5461 * Try to charge @folio's memcg for the swap space at @entry.
5462 *
5463 * Returns 0 on success, -ENOMEM on failure.
5464 */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5465 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5466 {
5467 unsigned int nr_pages = folio_nr_pages(folio);
5468 struct page_counter *counter;
5469 struct mem_cgroup *memcg;
5470 struct obj_cgroup *objcg;
5471
5472 if (do_memsw_account())
5473 return 0;
5474
5475 objcg = folio_objcg(folio);
5476 VM_WARN_ON_ONCE_FOLIO(!objcg, folio);
5477 if (!objcg)
5478 return 0;
5479
5480 rcu_read_lock();
5481 memcg = obj_cgroup_memcg(objcg);
5482 if (!entry.val) {
5483 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5484 rcu_read_unlock();
5485 return 0;
5486 }
5487
5488 memcg = mem_cgroup_private_id_get_online(memcg, nr_pages);
5489 /* memcg is pined by memcg ID. */
5490 rcu_read_unlock();
5491
5492 if (!mem_cgroup_is_root(memcg) &&
5493 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5494 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5495 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5496 mem_cgroup_private_id_put(memcg, nr_pages);
5497 return -ENOMEM;
5498 }
5499 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5500
5501 swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
5502
5503 return 0;
5504 }
5505
5506 /**
5507 * __mem_cgroup_uncharge_swap - uncharge swap space
5508 * @entry: swap entry to uncharge
5509 * @nr_pages: the amount of swap space to uncharge
5510 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5511 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5512 {
5513 struct mem_cgroup *memcg;
5514 unsigned short id;
5515
5516 id = swap_cgroup_clear(entry, nr_pages);
5517 rcu_read_lock();
5518 memcg = mem_cgroup_from_private_id(id);
5519 if (memcg) {
5520 if (!mem_cgroup_is_root(memcg)) {
5521 if (do_memsw_account())
5522 page_counter_uncharge(&memcg->memsw, nr_pages);
5523 else
5524 page_counter_uncharge(&memcg->swap, nr_pages);
5525 }
5526 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5527 mem_cgroup_private_id_put(memcg, nr_pages);
5528 }
5529 rcu_read_unlock();
5530 }
5531
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5532 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5533 {
5534 long nr_swap_pages = get_nr_swap_pages();
5535
5536 if (mem_cgroup_disabled() || do_memsw_account())
5537 return nr_swap_pages;
5538 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5539 nr_swap_pages = min_t(long, nr_swap_pages,
5540 READ_ONCE(memcg->swap.max) -
5541 page_counter_read(&memcg->swap));
5542 return nr_swap_pages;
5543 }
5544
mem_cgroup_swap_full(struct folio * folio)5545 bool mem_cgroup_swap_full(struct folio *folio)
5546 {
5547 struct mem_cgroup *memcg;
5548 bool ret = false;
5549
5550 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5551
5552 if (vm_swap_full())
5553 return true;
5554 if (do_memsw_account() || !folio_memcg_charged(folio))
5555 return ret;
5556
5557 rcu_read_lock();
5558 memcg = folio_memcg(folio);
5559 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5560 unsigned long usage = page_counter_read(&memcg->swap);
5561
5562 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5563 usage * 2 >= READ_ONCE(memcg->swap.max)) {
5564 ret = true;
5565 break;
5566 }
5567 }
5568 rcu_read_unlock();
5569
5570 return ret;
5571 }
5572
setup_swap_account(char * s)5573 static int __init setup_swap_account(char *s)
5574 {
5575 bool res;
5576
5577 if (!kstrtobool(s, &res) && !res)
5578 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5579 "in favor of configuring swap control via cgroupfs. "
5580 "Please report your usecase to linux-mm@kvack.org if you "
5581 "depend on this functionality.\n");
5582 return 1;
5583 }
5584 __setup("swapaccount=", setup_swap_account);
5585
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5586 static u64 swap_current_read(struct cgroup_subsys_state *css,
5587 struct cftype *cft)
5588 {
5589 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5590
5591 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5592 }
5593
swap_peak_show(struct seq_file * sf,void * v)5594 static int swap_peak_show(struct seq_file *sf, void *v)
5595 {
5596 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5597
5598 return peak_show(sf, v, &memcg->swap);
5599 }
5600
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5601 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5602 size_t nbytes, loff_t off)
5603 {
5604 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5605
5606 return peak_write(of, buf, nbytes, off, &memcg->swap,
5607 &memcg->swap_peaks);
5608 }
5609
swap_high_show(struct seq_file * m,void * v)5610 static int swap_high_show(struct seq_file *m, void *v)
5611 {
5612 return seq_puts_memcg_tunable(m,
5613 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5614 }
5615
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5616 static ssize_t swap_high_write(struct kernfs_open_file *of,
5617 char *buf, size_t nbytes, loff_t off)
5618 {
5619 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5620 unsigned long high;
5621 int err;
5622
5623 buf = strstrip(buf);
5624 err = page_counter_memparse(buf, "max", &high);
5625 if (err)
5626 return err;
5627
5628 page_counter_set_high(&memcg->swap, high);
5629
5630 return nbytes;
5631 }
5632
swap_max_show(struct seq_file * m,void * v)5633 static int swap_max_show(struct seq_file *m, void *v)
5634 {
5635 return seq_puts_memcg_tunable(m,
5636 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5637 }
5638
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5639 static ssize_t swap_max_write(struct kernfs_open_file *of,
5640 char *buf, size_t nbytes, loff_t off)
5641 {
5642 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5643 unsigned long max;
5644 int err;
5645
5646 buf = strstrip(buf);
5647 err = page_counter_memparse(buf, "max", &max);
5648 if (err)
5649 return err;
5650
5651 xchg(&memcg->swap.max, max);
5652
5653 return nbytes;
5654 }
5655
swap_events_show(struct seq_file * m,void * v)5656 static int swap_events_show(struct seq_file *m, void *v)
5657 {
5658 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5659
5660 seq_printf(m, "high %lu\n",
5661 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5662 seq_printf(m, "max %lu\n",
5663 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5664 seq_printf(m, "fail %lu\n",
5665 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5666
5667 return 0;
5668 }
5669
5670 static struct cftype swap_files[] = {
5671 {
5672 .name = "swap.current",
5673 .flags = CFTYPE_NOT_ON_ROOT,
5674 .read_u64 = swap_current_read,
5675 },
5676 {
5677 .name = "swap.high",
5678 .flags = CFTYPE_NOT_ON_ROOT,
5679 .seq_show = swap_high_show,
5680 .write = swap_high_write,
5681 },
5682 {
5683 .name = "swap.max",
5684 .flags = CFTYPE_NOT_ON_ROOT,
5685 .seq_show = swap_max_show,
5686 .write = swap_max_write,
5687 },
5688 {
5689 .name = "swap.peak",
5690 .flags = CFTYPE_NOT_ON_ROOT,
5691 .open = peak_open,
5692 .release = peak_release,
5693 .seq_show = swap_peak_show,
5694 .write = swap_peak_write,
5695 },
5696 {
5697 .name = "swap.events",
5698 .flags = CFTYPE_NOT_ON_ROOT,
5699 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5700 .seq_show = swap_events_show,
5701 },
5702 { } /* terminate */
5703 };
5704
5705 #ifdef CONFIG_ZSWAP
5706 /**
5707 * obj_cgroup_may_zswap - check if this cgroup can zswap
5708 * @objcg: the object cgroup
5709 *
5710 * Check if the hierarchical zswap limit has been reached.
5711 *
5712 * This doesn't check for specific headroom, and it is not atomic
5713 * either. But with zswap, the size of the allocation is only known
5714 * once compression has occurred, and this optimistic pre-check avoids
5715 * spending cycles on compression when there is already no room left
5716 * or zswap is disabled altogether somewhere in the hierarchy.
5717 */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5718 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5719 {
5720 struct mem_cgroup *memcg, *original_memcg;
5721 bool ret = true;
5722
5723 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5724 return true;
5725
5726 original_memcg = get_mem_cgroup_from_objcg(objcg);
5727 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5728 memcg = parent_mem_cgroup(memcg)) {
5729 unsigned long max = READ_ONCE(memcg->zswap_max);
5730 unsigned long pages;
5731
5732 if (max == PAGE_COUNTER_MAX)
5733 continue;
5734 if (max == 0) {
5735 ret = false;
5736 break;
5737 }
5738
5739 /* Force flush to get accurate stats for charging */
5740 __mem_cgroup_flush_stats(memcg, true);
5741 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5742 if (pages < max)
5743 continue;
5744 ret = false;
5745 break;
5746 }
5747 mem_cgroup_put(original_memcg);
5748 return ret;
5749 }
5750
5751 /**
5752 * obj_cgroup_charge_zswap - charge compression backend memory
5753 * @objcg: the object cgroup
5754 * @size: size of compressed object
5755 *
5756 * This forces the charge after obj_cgroup_may_zswap() allowed
5757 * compression and storage in zswap for this cgroup to go ahead.
5758 */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5759 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5760 {
5761 struct mem_cgroup *memcg;
5762
5763 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5764 return;
5765
5766 if (obj_cgroup_is_root(objcg))
5767 return;
5768
5769 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5770
5771 /* PF_MEMALLOC context, charging must succeed */
5772 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5773 VM_WARN_ON_ONCE(1);
5774
5775 rcu_read_lock();
5776 memcg = obj_cgroup_memcg(objcg);
5777 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5778 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5779 if (size == PAGE_SIZE)
5780 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, 1);
5781 rcu_read_unlock();
5782 }
5783
5784 /**
5785 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5786 * @objcg: the object cgroup
5787 * @size: size of compressed object
5788 *
5789 * Uncharges zswap memory on page in.
5790 */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5791 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5792 {
5793 struct mem_cgroup *memcg;
5794
5795 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5796 return;
5797
5798 if (obj_cgroup_is_root(objcg))
5799 return;
5800
5801 obj_cgroup_uncharge(objcg, size);
5802
5803 rcu_read_lock();
5804 memcg = obj_cgroup_memcg(objcg);
5805 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5806 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5807 if (size == PAGE_SIZE)
5808 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, -1);
5809 rcu_read_unlock();
5810 }
5811
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5812 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5813 {
5814 /* if zswap is disabled, do not block pages going to the swapping device */
5815 if (!zswap_is_enabled())
5816 return true;
5817
5818 for (; memcg; memcg = parent_mem_cgroup(memcg))
5819 if (!READ_ONCE(memcg->zswap_writeback))
5820 return false;
5821
5822 return true;
5823 }
5824
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5825 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5826 struct cftype *cft)
5827 {
5828 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5829
5830 mem_cgroup_flush_stats(memcg);
5831 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5832 }
5833
zswap_max_show(struct seq_file * m,void * v)5834 static int zswap_max_show(struct seq_file *m, void *v)
5835 {
5836 return seq_puts_memcg_tunable(m,
5837 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5838 }
5839
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5840 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5841 char *buf, size_t nbytes, loff_t off)
5842 {
5843 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5844 unsigned long max;
5845 int err;
5846
5847 buf = strstrip(buf);
5848 err = page_counter_memparse(buf, "max", &max);
5849 if (err)
5850 return err;
5851
5852 xchg(&memcg->zswap_max, max);
5853
5854 return nbytes;
5855 }
5856
zswap_writeback_show(struct seq_file * m,void * v)5857 static int zswap_writeback_show(struct seq_file *m, void *v)
5858 {
5859 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5860
5861 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5862 return 0;
5863 }
5864
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5865 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5866 char *buf, size_t nbytes, loff_t off)
5867 {
5868 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5869 int zswap_writeback;
5870 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5871
5872 if (parse_ret)
5873 return parse_ret;
5874
5875 if (zswap_writeback != 0 && zswap_writeback != 1)
5876 return -EINVAL;
5877
5878 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5879 return nbytes;
5880 }
5881
5882 static struct cftype zswap_files[] = {
5883 {
5884 .name = "zswap.current",
5885 .flags = CFTYPE_NOT_ON_ROOT,
5886 .read_u64 = zswap_current_read,
5887 },
5888 {
5889 .name = "zswap.max",
5890 .flags = CFTYPE_NOT_ON_ROOT,
5891 .seq_show = zswap_max_show,
5892 .write = zswap_max_write,
5893 },
5894 {
5895 .name = "zswap.writeback",
5896 .seq_show = zswap_writeback_show,
5897 .write = zswap_writeback_write,
5898 },
5899 { } /* terminate */
5900 };
5901 #endif /* CONFIG_ZSWAP */
5902
mem_cgroup_swap_init(void)5903 static int __init mem_cgroup_swap_init(void)
5904 {
5905 if (mem_cgroup_disabled())
5906 return 0;
5907
5908 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5909 #ifdef CONFIG_MEMCG_V1
5910 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5911 #endif
5912 #ifdef CONFIG_ZSWAP
5913 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5914 #endif
5915 return 0;
5916 }
5917 subsys_initcall(mem_cgroup_swap_init);
5918
5919 #endif /* CONFIG_SWAP */
5920
mem_cgroup_node_filter_allowed(struct mem_cgroup * memcg,nodemask_t * mask)5921 void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask)
5922 {
5923 nodemask_t allowed;
5924
5925 if (!memcg)
5926 return;
5927
5928 /*
5929 * Since this interface is intended for use by migration paths, and
5930 * reclaim and migration are subject to race conditions such as changes
5931 * in effective_mems and hot-unpluging of nodes, inaccurate allowed
5932 * mask is acceptable.
5933 */
5934 cpuset_nodes_allowed(memcg->css.cgroup, &allowed);
5935 nodes_and(*mask, *mask, allowed);
5936 }
5937
mem_cgroup_show_protected_memory(struct mem_cgroup * memcg)5938 void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
5939 {
5940 if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5941 return;
5942
5943 if (!memcg)
5944 memcg = root_mem_cgroup;
5945
5946 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
5947 K(atomic_long_read(&memcg->memory.children_min_usage)),
5948 K(atomic_long_read(&memcg->memory.children_low_usage)));
5949 }
5950