1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/pagevec.h>
37 #include <linux/vm_event_item.h>
38 #include <linux/smp.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
41 #include <linux/bit_spinlock.h>
42 #include <linux/rcupdate.h>
43 #include <linux/limits.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/mutex.h>
47 #include <linux/rbtree.h>
48 #include <linux/slab.h>
49 #include <linux/swapops.h>
50 #include <linux/spinlock.h>
51 #include <linux/fs.h>
52 #include <linux/seq_file.h>
53 #include <linux/parser.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71
72 #include <linux/uaccess.h>
73
74 #include <trace/events/vmscan.h>
75
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
78
79 struct mem_cgroup *root_mem_cgroup __read_mostly;
80
81 /* Active memory cgroup to use from an interrupt context */
82 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
83 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
84
85 /* Socket memory accounting disabled? */
86 static bool cgroup_memory_nosocket __ro_after_init;
87
88 /* Kernel memory accounting disabled? */
89 static bool cgroup_memory_nokmem __ro_after_init;
90
91 /* BPF memory accounting disabled? */
92 static bool cgroup_memory_nobpf __ro_after_init;
93
94 #ifdef CONFIG_CGROUP_WRITEBACK
95 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
96 #endif
97
task_is_dying(void)98 static inline bool task_is_dying(void)
99 {
100 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
101 (current->flags & PF_EXITING);
102 }
103
104 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)105 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
106 {
107 if (!memcg)
108 memcg = root_mem_cgroup;
109 return &memcg->vmpressure;
110 }
111
vmpressure_to_memcg(struct vmpressure * vmpr)112 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
113 {
114 return container_of(vmpr, struct mem_cgroup, vmpressure);
115 }
116
117 #define CURRENT_OBJCG_UPDATE_BIT 0
118 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
119
120 static DEFINE_SPINLOCK(objcg_lock);
121
mem_cgroup_kmem_disabled(void)122 bool mem_cgroup_kmem_disabled(void)
123 {
124 return cgroup_memory_nokmem;
125 }
126
127 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
128 unsigned int nr_pages);
129
obj_cgroup_release(struct percpu_ref * ref)130 static void obj_cgroup_release(struct percpu_ref *ref)
131 {
132 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
133 unsigned int nr_bytes;
134 unsigned int nr_pages;
135 unsigned long flags;
136
137 /*
138 * At this point all allocated objects are freed, and
139 * objcg->nr_charged_bytes can't have an arbitrary byte value.
140 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
141 *
142 * The following sequence can lead to it:
143 * 1) CPU0: objcg == stock->cached_objcg
144 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
145 * PAGE_SIZE bytes are charged
146 * 3) CPU1: a process from another memcg is allocating something,
147 * the stock if flushed,
148 * objcg->nr_charged_bytes = PAGE_SIZE - 92
149 * 5) CPU0: we do release this object,
150 * 92 bytes are added to stock->nr_bytes
151 * 6) CPU0: stock is flushed,
152 * 92 bytes are added to objcg->nr_charged_bytes
153 *
154 * In the result, nr_charged_bytes == PAGE_SIZE.
155 * This page will be uncharged in obj_cgroup_release().
156 */
157 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
158 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
159 nr_pages = nr_bytes >> PAGE_SHIFT;
160
161 if (nr_pages)
162 obj_cgroup_uncharge_pages(objcg, nr_pages);
163
164 spin_lock_irqsave(&objcg_lock, flags);
165 list_del(&objcg->list);
166 spin_unlock_irqrestore(&objcg_lock, flags);
167
168 percpu_ref_exit(ref);
169 kfree_rcu(objcg, rcu);
170 }
171
obj_cgroup_alloc(void)172 static struct obj_cgroup *obj_cgroup_alloc(void)
173 {
174 struct obj_cgroup *objcg;
175 int ret;
176
177 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
178 if (!objcg)
179 return NULL;
180
181 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
182 GFP_KERNEL);
183 if (ret) {
184 kfree(objcg);
185 return NULL;
186 }
187 INIT_LIST_HEAD(&objcg->list);
188 return objcg;
189 }
190
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)191 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
192 struct mem_cgroup *parent)
193 {
194 struct obj_cgroup *objcg, *iter;
195
196 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
197
198 spin_lock_irq(&objcg_lock);
199
200 /* 1) Ready to reparent active objcg. */
201 list_add(&objcg->list, &memcg->objcg_list);
202 /* 2) Reparent active objcg and already reparented objcgs to parent. */
203 list_for_each_entry(iter, &memcg->objcg_list, list)
204 WRITE_ONCE(iter->memcg, parent);
205 /* 3) Move already reparented objcgs to the parent's list */
206 list_splice(&memcg->objcg_list, &parent->objcg_list);
207
208 spin_unlock_irq(&objcg_lock);
209
210 percpu_ref_kill(&objcg->refcnt);
211 }
212
213 /*
214 * A lot of the calls to the cache allocation functions are expected to be
215 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
216 * conditional to this static branch, we'll have to allow modules that does
217 * kmem_cache_alloc and the such to see this symbol as well
218 */
219 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
220 EXPORT_SYMBOL(memcg_kmem_online_key);
221
222 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
223 EXPORT_SYMBOL(memcg_bpf_enabled_key);
224
225 /**
226 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
227 * @folio: folio of interest
228 *
229 * If memcg is bound to the default hierarchy, css of the memcg associated
230 * with @folio is returned. The returned css remains associated with @folio
231 * until it is released.
232 *
233 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
234 * is returned.
235 */
mem_cgroup_css_from_folio(struct folio * folio)236 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
237 {
238 struct mem_cgroup *memcg = folio_memcg(folio);
239
240 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
241 memcg = root_mem_cgroup;
242
243 return &memcg->css;
244 }
245
246 /**
247 * page_cgroup_ino - return inode number of the memcg a page is charged to
248 * @page: the page
249 *
250 * Look up the closest online ancestor of the memory cgroup @page is charged to
251 * and return its inode number or 0 if @page is not charged to any cgroup. It
252 * is safe to call this function without holding a reference to @page.
253 *
254 * Note, this function is inherently racy, because there is nothing to prevent
255 * the cgroup inode from getting torn down and potentially reallocated a moment
256 * after page_cgroup_ino() returns, so it only should be used by callers that
257 * do not care (such as procfs interfaces).
258 */
page_cgroup_ino(struct page * page)259 ino_t page_cgroup_ino(struct page *page)
260 {
261 struct mem_cgroup *memcg;
262 unsigned long ino = 0;
263
264 rcu_read_lock();
265 /* page_folio() is racy here, but the entire function is racy anyway */
266 memcg = folio_memcg_check(page_folio(page));
267
268 while (memcg && !(memcg->css.flags & CSS_ONLINE))
269 memcg = parent_mem_cgroup(memcg);
270 if (memcg)
271 ino = cgroup_ino(memcg->css.cgroup);
272 rcu_read_unlock();
273 return ino;
274 }
275
276 /* Subset of node_stat_item for memcg stats */
277 static const unsigned int memcg_node_stat_items[] = {
278 NR_INACTIVE_ANON,
279 NR_ACTIVE_ANON,
280 NR_INACTIVE_FILE,
281 NR_ACTIVE_FILE,
282 NR_UNEVICTABLE,
283 NR_SLAB_RECLAIMABLE_B,
284 NR_SLAB_UNRECLAIMABLE_B,
285 WORKINGSET_REFAULT_ANON,
286 WORKINGSET_REFAULT_FILE,
287 WORKINGSET_ACTIVATE_ANON,
288 WORKINGSET_ACTIVATE_FILE,
289 WORKINGSET_RESTORE_ANON,
290 WORKINGSET_RESTORE_FILE,
291 WORKINGSET_NODERECLAIM,
292 NR_ANON_MAPPED,
293 NR_FILE_MAPPED,
294 NR_FILE_PAGES,
295 NR_FILE_DIRTY,
296 NR_WRITEBACK,
297 NR_SHMEM,
298 NR_SHMEM_THPS,
299 NR_FILE_THPS,
300 NR_ANON_THPS,
301 NR_KERNEL_STACK_KB,
302 NR_PAGETABLE,
303 NR_SECONDARY_PAGETABLE,
304 #ifdef CONFIG_SWAP
305 NR_SWAPCACHE,
306 #endif
307 #ifdef CONFIG_NUMA_BALANCING
308 PGPROMOTE_SUCCESS,
309 #endif
310 PGDEMOTE_KSWAPD,
311 PGDEMOTE_DIRECT,
312 PGDEMOTE_KHUGEPAGED,
313 };
314
315 static const unsigned int memcg_stat_items[] = {
316 MEMCG_SWAP,
317 MEMCG_SOCK,
318 MEMCG_PERCPU_B,
319 MEMCG_VMALLOC,
320 MEMCG_KMEM,
321 MEMCG_ZSWAP_B,
322 MEMCG_ZSWAPPED,
323 };
324
325 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
326 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
327 ARRAY_SIZE(memcg_stat_items))
328 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
329 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
330
init_memcg_stats(void)331 static void init_memcg_stats(void)
332 {
333 u8 i, j = 0;
334
335 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
336
337 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
338
339 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
340 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
341
342 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
343 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
344 }
345
memcg_stats_index(int idx)346 static inline int memcg_stats_index(int idx)
347 {
348 return mem_cgroup_stats_index[idx];
349 }
350
351 struct lruvec_stats_percpu {
352 /* Local (CPU and cgroup) state */
353 long state[NR_MEMCG_NODE_STAT_ITEMS];
354
355 /* Delta calculation for lockless upward propagation */
356 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
357 };
358
359 struct lruvec_stats {
360 /* Aggregated (CPU and subtree) state */
361 long state[NR_MEMCG_NODE_STAT_ITEMS];
362
363 /* Non-hierarchical (CPU aggregated) state */
364 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
365
366 /* Pending child counts during tree propagation */
367 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
368 };
369
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)370 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
371 {
372 struct mem_cgroup_per_node *pn;
373 long x;
374 int i;
375
376 if (mem_cgroup_disabled())
377 return node_page_state(lruvec_pgdat(lruvec), idx);
378
379 i = memcg_stats_index(idx);
380 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
381 return 0;
382
383 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
384 x = READ_ONCE(pn->lruvec_stats->state[i]);
385 #ifdef CONFIG_SMP
386 if (x < 0)
387 x = 0;
388 #endif
389 return x;
390 }
391
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)392 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
393 enum node_stat_item idx)
394 {
395 struct mem_cgroup_per_node *pn;
396 long x;
397 int i;
398
399 if (mem_cgroup_disabled())
400 return node_page_state(lruvec_pgdat(lruvec), idx);
401
402 i = memcg_stats_index(idx);
403 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
404 return 0;
405
406 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
407 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
408 #ifdef CONFIG_SMP
409 if (x < 0)
410 x = 0;
411 #endif
412 return x;
413 }
414
415 /* Subset of vm_event_item to report for memcg event stats */
416 static const unsigned int memcg_vm_event_stat[] = {
417 #ifdef CONFIG_MEMCG_V1
418 PGPGIN,
419 PGPGOUT,
420 #endif
421 PGSCAN_KSWAPD,
422 PGSCAN_DIRECT,
423 PGSCAN_KHUGEPAGED,
424 PGSTEAL_KSWAPD,
425 PGSTEAL_DIRECT,
426 PGSTEAL_KHUGEPAGED,
427 PGFAULT,
428 PGMAJFAULT,
429 PGREFILL,
430 PGACTIVATE,
431 PGDEACTIVATE,
432 PGLAZYFREE,
433 PGLAZYFREED,
434 #ifdef CONFIG_ZSWAP
435 ZSWPIN,
436 ZSWPOUT,
437 ZSWPWB,
438 #endif
439 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
440 THP_FAULT_ALLOC,
441 THP_COLLAPSE_ALLOC,
442 THP_SWPOUT,
443 THP_SWPOUT_FALLBACK,
444 #endif
445 #ifdef CONFIG_NUMA_BALANCING
446 NUMA_PAGE_MIGRATE,
447 NUMA_PTE_UPDATES,
448 NUMA_HINT_FAULTS,
449 #endif
450 };
451
452 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
453 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
454
init_memcg_events(void)455 static void init_memcg_events(void)
456 {
457 u8 i;
458
459 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
460
461 memset(mem_cgroup_events_index, U8_MAX,
462 sizeof(mem_cgroup_events_index));
463
464 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
465 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
466 }
467
memcg_events_index(enum vm_event_item idx)468 static inline int memcg_events_index(enum vm_event_item idx)
469 {
470 return mem_cgroup_events_index[idx];
471 }
472
473 struct memcg_vmstats_percpu {
474 /* Stats updates since the last flush */
475 unsigned int stats_updates;
476
477 /* Cached pointers for fast iteration in memcg_rstat_updated() */
478 struct memcg_vmstats_percpu *parent;
479 struct memcg_vmstats *vmstats;
480
481 /* The above should fit a single cacheline for memcg_rstat_updated() */
482
483 /* Local (CPU and cgroup) page state & events */
484 long state[MEMCG_VMSTAT_SIZE];
485 unsigned long events[NR_MEMCG_EVENTS];
486
487 /* Delta calculation for lockless upward propagation */
488 long state_prev[MEMCG_VMSTAT_SIZE];
489 unsigned long events_prev[NR_MEMCG_EVENTS];
490 } ____cacheline_aligned;
491
492 struct memcg_vmstats {
493 /* Aggregated (CPU and subtree) page state & events */
494 long state[MEMCG_VMSTAT_SIZE];
495 unsigned long events[NR_MEMCG_EVENTS];
496
497 /* Non-hierarchical (CPU aggregated) page state & events */
498 long state_local[MEMCG_VMSTAT_SIZE];
499 unsigned long events_local[NR_MEMCG_EVENTS];
500
501 /* Pending child counts during tree propagation */
502 long state_pending[MEMCG_VMSTAT_SIZE];
503 unsigned long events_pending[NR_MEMCG_EVENTS];
504
505 /* Stats updates since the last flush */
506 atomic64_t stats_updates;
507 };
508
509 /*
510 * memcg and lruvec stats flushing
511 *
512 * Many codepaths leading to stats update or read are performance sensitive and
513 * adding stats flushing in such codepaths is not desirable. So, to optimize the
514 * flushing the kernel does:
515 *
516 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
517 * rstat update tree grow unbounded.
518 *
519 * 2) Flush the stats synchronously on reader side only when there are more than
520 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
521 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
522 * only for 2 seconds due to (1).
523 */
524 static void flush_memcg_stats_dwork(struct work_struct *w);
525 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
526 static u64 flush_last_time;
527
528 #define FLUSH_TIME (2UL*HZ)
529
530 /*
531 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
532 * not rely on this as part of an acquired spinlock_t lock. These functions are
533 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
534 * is sufficient.
535 */
memcg_stats_lock(void)536 static void memcg_stats_lock(void)
537 {
538 preempt_disable_nested();
539 VM_WARN_ON_IRQS_ENABLED();
540 }
541
__memcg_stats_lock(void)542 static void __memcg_stats_lock(void)
543 {
544 preempt_disable_nested();
545 }
546
memcg_stats_unlock(void)547 static void memcg_stats_unlock(void)
548 {
549 preempt_enable_nested();
550 }
551
552
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)553 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
554 {
555 return atomic64_read(&vmstats->stats_updates) >
556 MEMCG_CHARGE_BATCH * num_online_cpus();
557 }
558
memcg_rstat_updated(struct mem_cgroup * memcg,int val)559 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
560 {
561 struct memcg_vmstats_percpu *statc;
562 int cpu = smp_processor_id();
563 unsigned int stats_updates;
564
565 if (!val)
566 return;
567
568 cgroup_rstat_updated(memcg->css.cgroup, cpu);
569 statc = this_cpu_ptr(memcg->vmstats_percpu);
570 for (; statc; statc = statc->parent) {
571 stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
572 WRITE_ONCE(statc->stats_updates, stats_updates);
573 if (stats_updates < MEMCG_CHARGE_BATCH)
574 continue;
575
576 /*
577 * If @memcg is already flush-able, increasing stats_updates is
578 * redundant. Avoid the overhead of the atomic update.
579 */
580 if (!memcg_vmstats_needs_flush(statc->vmstats))
581 atomic64_add(stats_updates,
582 &statc->vmstats->stats_updates);
583 WRITE_ONCE(statc->stats_updates, 0);
584 }
585 }
586
do_flush_stats(struct mem_cgroup * memcg)587 static void do_flush_stats(struct mem_cgroup *memcg)
588 {
589 if (mem_cgroup_is_root(memcg))
590 WRITE_ONCE(flush_last_time, jiffies_64);
591
592 cgroup_rstat_flush(memcg->css.cgroup);
593 }
594
595 /*
596 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
597 * @memcg: root of the subtree to flush
598 *
599 * Flushing is serialized by the underlying global rstat lock. There is also a
600 * minimum amount of work to be done even if there are no stat updates to flush.
601 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
602 * avoids unnecessary work and contention on the underlying lock.
603 */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)604 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
605 {
606 if (mem_cgroup_disabled())
607 return;
608
609 if (!memcg)
610 memcg = root_mem_cgroup;
611
612 if (memcg_vmstats_needs_flush(memcg->vmstats))
613 do_flush_stats(memcg);
614 }
615
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)616 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
617 {
618 /* Only flush if the periodic flusher is one full cycle late */
619 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
620 mem_cgroup_flush_stats(memcg);
621 }
622
flush_memcg_stats_dwork(struct work_struct * w)623 static void flush_memcg_stats_dwork(struct work_struct *w)
624 {
625 /*
626 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
627 * in latency-sensitive paths is as cheap as possible.
628 */
629 do_flush_stats(root_mem_cgroup);
630 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
631 }
632
memcg_page_state(struct mem_cgroup * memcg,int idx)633 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
634 {
635 long x;
636 int i = memcg_stats_index(idx);
637
638 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
639 return 0;
640
641 x = READ_ONCE(memcg->vmstats->state[i]);
642 #ifdef CONFIG_SMP
643 if (x < 0)
644 x = 0;
645 #endif
646 return x;
647 }
648
649 static int memcg_page_state_unit(int item);
650
651 /*
652 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
653 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
654 */
memcg_state_val_in_pages(int idx,int val)655 static int memcg_state_val_in_pages(int idx, int val)
656 {
657 int unit = memcg_page_state_unit(idx);
658
659 if (!val || unit == PAGE_SIZE)
660 return val;
661 else
662 return max(val * unit / PAGE_SIZE, 1UL);
663 }
664
665 /**
666 * __mod_memcg_state - update cgroup memory statistics
667 * @memcg: the memory cgroup
668 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
669 * @val: delta to add to the counter, can be negative
670 */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)671 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
672 int val)
673 {
674 int i = memcg_stats_index(idx);
675
676 if (mem_cgroup_disabled())
677 return;
678
679 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
680 return;
681
682 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
683 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
684 }
685
686 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)687 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
688 {
689 long x;
690 int i = memcg_stats_index(idx);
691
692 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
693 return 0;
694
695 x = READ_ONCE(memcg->vmstats->state_local[i]);
696 #ifdef CONFIG_SMP
697 if (x < 0)
698 x = 0;
699 #endif
700 return x;
701 }
702
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)703 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
704 enum node_stat_item idx,
705 int val)
706 {
707 struct mem_cgroup_per_node *pn;
708 struct mem_cgroup *memcg;
709 int i = memcg_stats_index(idx);
710
711 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
712 return;
713
714 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
715 memcg = pn->memcg;
716
717 /*
718 * The caller from rmap relies on disabled preemption because they never
719 * update their counter from in-interrupt context. For these two
720 * counters we check that the update is never performed from an
721 * interrupt context while other caller need to have disabled interrupt.
722 */
723 __memcg_stats_lock();
724 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
725 switch (idx) {
726 case NR_ANON_MAPPED:
727 case NR_FILE_MAPPED:
728 case NR_ANON_THPS:
729 WARN_ON_ONCE(!in_task());
730 break;
731 default:
732 VM_WARN_ON_IRQS_ENABLED();
733 }
734 }
735
736 /* Update memcg */
737 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
738
739 /* Update lruvec */
740 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
741
742 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
743 memcg_stats_unlock();
744 }
745
746 /**
747 * __mod_lruvec_state - update lruvec memory statistics
748 * @lruvec: the lruvec
749 * @idx: the stat item
750 * @val: delta to add to the counter, can be negative
751 *
752 * The lruvec is the intersection of the NUMA node and a cgroup. This
753 * function updates the all three counters that are affected by a
754 * change of state at this level: per-node, per-cgroup, per-lruvec.
755 */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)756 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
757 int val)
758 {
759 /* Update node */
760 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
761
762 /* Update memcg and lruvec */
763 if (!mem_cgroup_disabled())
764 __mod_memcg_lruvec_state(lruvec, idx, val);
765 }
766
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)767 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
768 int val)
769 {
770 struct mem_cgroup *memcg;
771 pg_data_t *pgdat = folio_pgdat(folio);
772 struct lruvec *lruvec;
773
774 rcu_read_lock();
775 memcg = folio_memcg(folio);
776 /* Untracked pages have no memcg, no lruvec. Update only the node */
777 if (!memcg) {
778 rcu_read_unlock();
779 __mod_node_page_state(pgdat, idx, val);
780 return;
781 }
782
783 lruvec = mem_cgroup_lruvec(memcg, pgdat);
784 __mod_lruvec_state(lruvec, idx, val);
785 rcu_read_unlock();
786 }
787 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
788
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)789 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
790 {
791 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
792 struct mem_cgroup *memcg;
793 struct lruvec *lruvec;
794
795 rcu_read_lock();
796 memcg = mem_cgroup_from_slab_obj(p);
797
798 /*
799 * Untracked pages have no memcg, no lruvec. Update only the
800 * node. If we reparent the slab objects to the root memcg,
801 * when we free the slab object, we need to update the per-memcg
802 * vmstats to keep it correct for the root memcg.
803 */
804 if (!memcg) {
805 __mod_node_page_state(pgdat, idx, val);
806 } else {
807 lruvec = mem_cgroup_lruvec(memcg, pgdat);
808 __mod_lruvec_state(lruvec, idx, val);
809 }
810 rcu_read_unlock();
811 }
812
813 /**
814 * __count_memcg_events - account VM events in a cgroup
815 * @memcg: the memory cgroup
816 * @idx: the event item
817 * @count: the number of events that occurred
818 */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)819 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
820 unsigned long count)
821 {
822 int i = memcg_events_index(idx);
823
824 if (mem_cgroup_disabled())
825 return;
826
827 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
828 return;
829
830 memcg_stats_lock();
831 __this_cpu_add(memcg->vmstats_percpu->events[i], count);
832 memcg_rstat_updated(memcg, count);
833 memcg_stats_unlock();
834 }
835
memcg_events(struct mem_cgroup * memcg,int event)836 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
837 {
838 int i = memcg_events_index(event);
839
840 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
841 return 0;
842
843 return READ_ONCE(memcg->vmstats->events[i]);
844 }
845
memcg_events_local(struct mem_cgroup * memcg,int event)846 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
847 {
848 int i = memcg_events_index(event);
849
850 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
851 return 0;
852
853 return READ_ONCE(memcg->vmstats->events_local[i]);
854 }
855
mem_cgroup_from_task(struct task_struct * p)856 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
857 {
858 /*
859 * mm_update_next_owner() may clear mm->owner to NULL
860 * if it races with swapoff, page migration, etc.
861 * So this can be called with p == NULL.
862 */
863 if (unlikely(!p))
864 return NULL;
865
866 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
867 }
868 EXPORT_SYMBOL(mem_cgroup_from_task);
869
active_memcg(void)870 static __always_inline struct mem_cgroup *active_memcg(void)
871 {
872 if (!in_task())
873 return this_cpu_read(int_active_memcg);
874 else
875 return current->active_memcg;
876 }
877
878 /**
879 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
880 * @mm: mm from which memcg should be extracted. It can be NULL.
881 *
882 * Obtain a reference on mm->memcg and returns it if successful. If mm
883 * is NULL, then the memcg is chosen as follows:
884 * 1) The active memcg, if set.
885 * 2) current->mm->memcg, if available
886 * 3) root memcg
887 * If mem_cgroup is disabled, NULL is returned.
888 */
get_mem_cgroup_from_mm(struct mm_struct * mm)889 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
890 {
891 struct mem_cgroup *memcg;
892
893 if (mem_cgroup_disabled())
894 return NULL;
895
896 /*
897 * Page cache insertions can happen without an
898 * actual mm context, e.g. during disk probing
899 * on boot, loopback IO, acct() writes etc.
900 *
901 * No need to css_get on root memcg as the reference
902 * counting is disabled on the root level in the
903 * cgroup core. See CSS_NO_REF.
904 */
905 if (unlikely(!mm)) {
906 memcg = active_memcg();
907 if (unlikely(memcg)) {
908 /* remote memcg must hold a ref */
909 css_get(&memcg->css);
910 return memcg;
911 }
912 mm = current->mm;
913 if (unlikely(!mm))
914 return root_mem_cgroup;
915 }
916
917 rcu_read_lock();
918 do {
919 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
920 if (unlikely(!memcg))
921 memcg = root_mem_cgroup;
922 } while (!css_tryget(&memcg->css));
923 rcu_read_unlock();
924 return memcg;
925 }
926 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
927
928 /**
929 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
930 */
get_mem_cgroup_from_current(void)931 struct mem_cgroup *get_mem_cgroup_from_current(void)
932 {
933 struct mem_cgroup *memcg;
934
935 if (mem_cgroup_disabled())
936 return NULL;
937
938 again:
939 rcu_read_lock();
940 memcg = mem_cgroup_from_task(current);
941 if (!css_tryget(&memcg->css)) {
942 rcu_read_unlock();
943 goto again;
944 }
945 rcu_read_unlock();
946 return memcg;
947 }
948
949 /**
950 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
951 * @folio: folio from which memcg should be extracted.
952 */
get_mem_cgroup_from_folio(struct folio * folio)953 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
954 {
955 struct mem_cgroup *memcg = folio_memcg(folio);
956
957 if (mem_cgroup_disabled())
958 return NULL;
959
960 rcu_read_lock();
961 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
962 memcg = root_mem_cgroup;
963 rcu_read_unlock();
964 return memcg;
965 }
966
967 /**
968 * mem_cgroup_iter - iterate over memory cgroup hierarchy
969 * @root: hierarchy root
970 * @prev: previously returned memcg, NULL on first invocation
971 * @reclaim: cookie for shared reclaim walks, NULL for full walks
972 *
973 * Returns references to children of the hierarchy below @root, or
974 * @root itself, or %NULL after a full round-trip.
975 *
976 * Caller must pass the return value in @prev on subsequent
977 * invocations for reference counting, or use mem_cgroup_iter_break()
978 * to cancel a hierarchy walk before the round-trip is complete.
979 *
980 * Reclaimers can specify a node in @reclaim to divide up the memcgs
981 * in the hierarchy among all concurrent reclaimers operating on the
982 * same node.
983 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)984 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
985 struct mem_cgroup *prev,
986 struct mem_cgroup_reclaim_cookie *reclaim)
987 {
988 struct mem_cgroup_reclaim_iter *iter;
989 struct cgroup_subsys_state *css;
990 struct mem_cgroup *pos;
991 struct mem_cgroup *next;
992
993 if (mem_cgroup_disabled())
994 return NULL;
995
996 if (!root)
997 root = root_mem_cgroup;
998
999 rcu_read_lock();
1000 restart:
1001 next = NULL;
1002
1003 if (reclaim) {
1004 int gen;
1005 int nid = reclaim->pgdat->node_id;
1006
1007 iter = &root->nodeinfo[nid]->iter;
1008 gen = atomic_read(&iter->generation);
1009
1010 /*
1011 * On start, join the current reclaim iteration cycle.
1012 * Exit when a concurrent walker completes it.
1013 */
1014 if (!prev)
1015 reclaim->generation = gen;
1016 else if (reclaim->generation != gen)
1017 goto out_unlock;
1018
1019 pos = READ_ONCE(iter->position);
1020 } else
1021 pos = prev;
1022
1023 css = pos ? &pos->css : NULL;
1024
1025 while ((css = css_next_descendant_pre(css, &root->css))) {
1026 /*
1027 * Verify the css and acquire a reference. The root
1028 * is provided by the caller, so we know it's alive
1029 * and kicking, and don't take an extra reference.
1030 */
1031 if (css == &root->css || css_tryget(css))
1032 break;
1033 }
1034
1035 next = mem_cgroup_from_css(css);
1036
1037 if (reclaim) {
1038 /*
1039 * The position could have already been updated by a competing
1040 * thread, so check that the value hasn't changed since we read
1041 * it to avoid reclaiming from the same cgroup twice.
1042 */
1043 if (cmpxchg(&iter->position, pos, next) != pos) {
1044 if (css && css != &root->css)
1045 css_put(css);
1046 goto restart;
1047 }
1048
1049 if (!next) {
1050 atomic_inc(&iter->generation);
1051
1052 /*
1053 * Reclaimers share the hierarchy walk, and a
1054 * new one might jump in right at the end of
1055 * the hierarchy - make sure they see at least
1056 * one group and restart from the beginning.
1057 */
1058 if (!prev)
1059 goto restart;
1060 }
1061 }
1062
1063 out_unlock:
1064 rcu_read_unlock();
1065 if (prev && prev != root)
1066 css_put(&prev->css);
1067
1068 return next;
1069 }
1070
1071 /**
1072 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1073 * @root: hierarchy root
1074 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1075 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1076 void mem_cgroup_iter_break(struct mem_cgroup *root,
1077 struct mem_cgroup *prev)
1078 {
1079 if (!root)
1080 root = root_mem_cgroup;
1081 if (prev && prev != root)
1082 css_put(&prev->css);
1083 }
1084
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1085 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1086 struct mem_cgroup *dead_memcg)
1087 {
1088 struct mem_cgroup_reclaim_iter *iter;
1089 struct mem_cgroup_per_node *mz;
1090 int nid;
1091
1092 for_each_node(nid) {
1093 mz = from->nodeinfo[nid];
1094 iter = &mz->iter;
1095 cmpxchg(&iter->position, dead_memcg, NULL);
1096 }
1097 }
1098
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1099 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1100 {
1101 struct mem_cgroup *memcg = dead_memcg;
1102 struct mem_cgroup *last;
1103
1104 do {
1105 __invalidate_reclaim_iterators(memcg, dead_memcg);
1106 last = memcg;
1107 } while ((memcg = parent_mem_cgroup(memcg)));
1108
1109 /*
1110 * When cgroup1 non-hierarchy mode is used,
1111 * parent_mem_cgroup() does not walk all the way up to the
1112 * cgroup root (root_mem_cgroup). So we have to handle
1113 * dead_memcg from cgroup root separately.
1114 */
1115 if (!mem_cgroup_is_root(last))
1116 __invalidate_reclaim_iterators(root_mem_cgroup,
1117 dead_memcg);
1118 }
1119
1120 /**
1121 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1122 * @memcg: hierarchy root
1123 * @fn: function to call for each task
1124 * @arg: argument passed to @fn
1125 *
1126 * This function iterates over tasks attached to @memcg or to any of its
1127 * descendants and calls @fn for each task. If @fn returns a non-zero
1128 * value, the function breaks the iteration loop. Otherwise, it will iterate
1129 * over all tasks and return 0.
1130 *
1131 * This function must not be called for the root memory cgroup.
1132 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1133 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1134 int (*fn)(struct task_struct *, void *), void *arg)
1135 {
1136 struct mem_cgroup *iter;
1137 int ret = 0;
1138
1139 BUG_ON(mem_cgroup_is_root(memcg));
1140
1141 for_each_mem_cgroup_tree(iter, memcg) {
1142 struct css_task_iter it;
1143 struct task_struct *task;
1144
1145 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1146 while (!ret && (task = css_task_iter_next(&it)))
1147 ret = fn(task, arg);
1148 css_task_iter_end(&it);
1149 if (ret) {
1150 mem_cgroup_iter_break(memcg, iter);
1151 break;
1152 }
1153 }
1154 }
1155
1156 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1157 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1158 {
1159 struct mem_cgroup *memcg;
1160
1161 if (mem_cgroup_disabled())
1162 return;
1163
1164 memcg = folio_memcg(folio);
1165
1166 if (!memcg)
1167 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1168 else
1169 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1170 }
1171 #endif
1172
1173 /**
1174 * folio_lruvec_lock - Lock the lruvec for a folio.
1175 * @folio: Pointer to the folio.
1176 *
1177 * These functions are safe to use under any of the following conditions:
1178 * - folio locked
1179 * - folio_test_lru false
1180 * - folio_memcg_lock()
1181 * - folio frozen (refcount of 0)
1182 *
1183 * Return: The lruvec this folio is on with its lock held.
1184 */
folio_lruvec_lock(struct folio * folio)1185 struct lruvec *folio_lruvec_lock(struct folio *folio)
1186 {
1187 struct lruvec *lruvec = folio_lruvec(folio);
1188
1189 spin_lock(&lruvec->lru_lock);
1190 lruvec_memcg_debug(lruvec, folio);
1191
1192 return lruvec;
1193 }
1194
1195 /**
1196 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1197 * @folio: Pointer to the folio.
1198 *
1199 * These functions are safe to use under any of the following conditions:
1200 * - folio locked
1201 * - folio_test_lru false
1202 * - folio_memcg_lock()
1203 * - folio frozen (refcount of 0)
1204 *
1205 * Return: The lruvec this folio is on with its lock held and interrupts
1206 * disabled.
1207 */
folio_lruvec_lock_irq(struct folio * folio)1208 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1209 {
1210 struct lruvec *lruvec = folio_lruvec(folio);
1211
1212 spin_lock_irq(&lruvec->lru_lock);
1213 lruvec_memcg_debug(lruvec, folio);
1214
1215 return lruvec;
1216 }
1217
1218 /**
1219 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1220 * @folio: Pointer to the folio.
1221 * @flags: Pointer to irqsave flags.
1222 *
1223 * These functions are safe to use under any of the following conditions:
1224 * - folio locked
1225 * - folio_test_lru false
1226 * - folio_memcg_lock()
1227 * - folio frozen (refcount of 0)
1228 *
1229 * Return: The lruvec this folio is on with its lock held and interrupts
1230 * disabled.
1231 */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1232 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1233 unsigned long *flags)
1234 {
1235 struct lruvec *lruvec = folio_lruvec(folio);
1236
1237 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1238 lruvec_memcg_debug(lruvec, folio);
1239
1240 return lruvec;
1241 }
1242
1243 /**
1244 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1245 * @lruvec: mem_cgroup per zone lru vector
1246 * @lru: index of lru list the page is sitting on
1247 * @zid: zone id of the accounted pages
1248 * @nr_pages: positive when adding or negative when removing
1249 *
1250 * This function must be called under lru_lock, just before a page is added
1251 * to or just after a page is removed from an lru list.
1252 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1253 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1254 int zid, int nr_pages)
1255 {
1256 struct mem_cgroup_per_node *mz;
1257 unsigned long *lru_size;
1258 long size;
1259
1260 if (mem_cgroup_disabled())
1261 return;
1262
1263 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1264 lru_size = &mz->lru_zone_size[zid][lru];
1265
1266 if (nr_pages < 0)
1267 *lru_size += nr_pages;
1268
1269 size = *lru_size;
1270 if (WARN_ONCE(size < 0,
1271 "%s(%p, %d, %d): lru_size %ld\n",
1272 __func__, lruvec, lru, nr_pages, size)) {
1273 VM_BUG_ON(1);
1274 *lru_size = 0;
1275 }
1276
1277 if (nr_pages > 0)
1278 *lru_size += nr_pages;
1279 }
1280
1281 /**
1282 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1283 * @memcg: the memory cgroup
1284 *
1285 * Returns the maximum amount of memory @mem can be charged with, in
1286 * pages.
1287 */
mem_cgroup_margin(struct mem_cgroup * memcg)1288 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1289 {
1290 unsigned long margin = 0;
1291 unsigned long count;
1292 unsigned long limit;
1293
1294 count = page_counter_read(&memcg->memory);
1295 limit = READ_ONCE(memcg->memory.max);
1296 if (count < limit)
1297 margin = limit - count;
1298
1299 if (do_memsw_account()) {
1300 count = page_counter_read(&memcg->memsw);
1301 limit = READ_ONCE(memcg->memsw.max);
1302 if (count < limit)
1303 margin = min(margin, limit - count);
1304 else
1305 margin = 0;
1306 }
1307
1308 return margin;
1309 }
1310
1311 struct memory_stat {
1312 const char *name;
1313 unsigned int idx;
1314 };
1315
1316 static const struct memory_stat memory_stats[] = {
1317 { "anon", NR_ANON_MAPPED },
1318 { "file", NR_FILE_PAGES },
1319 { "kernel", MEMCG_KMEM },
1320 { "kernel_stack", NR_KERNEL_STACK_KB },
1321 { "pagetables", NR_PAGETABLE },
1322 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1323 { "percpu", MEMCG_PERCPU_B },
1324 { "sock", MEMCG_SOCK },
1325 { "vmalloc", MEMCG_VMALLOC },
1326 { "shmem", NR_SHMEM },
1327 #ifdef CONFIG_ZSWAP
1328 { "zswap", MEMCG_ZSWAP_B },
1329 { "zswapped", MEMCG_ZSWAPPED },
1330 #endif
1331 { "file_mapped", NR_FILE_MAPPED },
1332 { "file_dirty", NR_FILE_DIRTY },
1333 { "file_writeback", NR_WRITEBACK },
1334 #ifdef CONFIG_SWAP
1335 { "swapcached", NR_SWAPCACHE },
1336 #endif
1337 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1338 { "anon_thp", NR_ANON_THPS },
1339 { "file_thp", NR_FILE_THPS },
1340 { "shmem_thp", NR_SHMEM_THPS },
1341 #endif
1342 { "inactive_anon", NR_INACTIVE_ANON },
1343 { "active_anon", NR_ACTIVE_ANON },
1344 { "inactive_file", NR_INACTIVE_FILE },
1345 { "active_file", NR_ACTIVE_FILE },
1346 { "unevictable", NR_UNEVICTABLE },
1347 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1348 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1349
1350 /* The memory events */
1351 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1352 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1353 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1354 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1355 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1356 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1357 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1358
1359 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1360 { "pgdemote_direct", PGDEMOTE_DIRECT },
1361 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1362 #ifdef CONFIG_NUMA_BALANCING
1363 { "pgpromote_success", PGPROMOTE_SUCCESS },
1364 #endif
1365 };
1366
1367 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1368 static int memcg_page_state_unit(int item)
1369 {
1370 switch (item) {
1371 case MEMCG_PERCPU_B:
1372 case MEMCG_ZSWAP_B:
1373 case NR_SLAB_RECLAIMABLE_B:
1374 case NR_SLAB_UNRECLAIMABLE_B:
1375 return 1;
1376 case NR_KERNEL_STACK_KB:
1377 return SZ_1K;
1378 default:
1379 return PAGE_SIZE;
1380 }
1381 }
1382
1383 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1384 static int memcg_page_state_output_unit(int item)
1385 {
1386 /*
1387 * Workingset state is actually in pages, but we export it to userspace
1388 * as a scalar count of events, so special case it here.
1389 *
1390 * Demotion and promotion activities are exported in pages, consistent
1391 * with their global counterparts.
1392 */
1393 switch (item) {
1394 case WORKINGSET_REFAULT_ANON:
1395 case WORKINGSET_REFAULT_FILE:
1396 case WORKINGSET_ACTIVATE_ANON:
1397 case WORKINGSET_ACTIVATE_FILE:
1398 case WORKINGSET_RESTORE_ANON:
1399 case WORKINGSET_RESTORE_FILE:
1400 case WORKINGSET_NODERECLAIM:
1401 case PGDEMOTE_KSWAPD:
1402 case PGDEMOTE_DIRECT:
1403 case PGDEMOTE_KHUGEPAGED:
1404 #ifdef CONFIG_NUMA_BALANCING
1405 case PGPROMOTE_SUCCESS:
1406 #endif
1407 return 1;
1408 default:
1409 return memcg_page_state_unit(item);
1410 }
1411 }
1412
memcg_page_state_output(struct mem_cgroup * memcg,int item)1413 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1414 {
1415 return memcg_page_state(memcg, item) *
1416 memcg_page_state_output_unit(item);
1417 }
1418
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1419 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1420 {
1421 return memcg_page_state_local(memcg, item) *
1422 memcg_page_state_output_unit(item);
1423 }
1424
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1425 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1426 {
1427 int i;
1428
1429 /*
1430 * Provide statistics on the state of the memory subsystem as
1431 * well as cumulative event counters that show past behavior.
1432 *
1433 * This list is ordered following a combination of these gradients:
1434 * 1) generic big picture -> specifics and details
1435 * 2) reflecting userspace activity -> reflecting kernel heuristics
1436 *
1437 * Current memory state:
1438 */
1439 mem_cgroup_flush_stats(memcg);
1440
1441 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1442 u64 size;
1443
1444 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1445 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1446
1447 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1448 size += memcg_page_state_output(memcg,
1449 NR_SLAB_RECLAIMABLE_B);
1450 seq_buf_printf(s, "slab %llu\n", size);
1451 }
1452 }
1453
1454 /* Accumulated memory events */
1455 seq_buf_printf(s, "pgscan %lu\n",
1456 memcg_events(memcg, PGSCAN_KSWAPD) +
1457 memcg_events(memcg, PGSCAN_DIRECT) +
1458 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1459 seq_buf_printf(s, "pgsteal %lu\n",
1460 memcg_events(memcg, PGSTEAL_KSWAPD) +
1461 memcg_events(memcg, PGSTEAL_DIRECT) +
1462 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1463
1464 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1465 #ifdef CONFIG_MEMCG_V1
1466 if (memcg_vm_event_stat[i] == PGPGIN ||
1467 memcg_vm_event_stat[i] == PGPGOUT)
1468 continue;
1469 #endif
1470 seq_buf_printf(s, "%s %lu\n",
1471 vm_event_name(memcg_vm_event_stat[i]),
1472 memcg_events(memcg, memcg_vm_event_stat[i]));
1473 }
1474 }
1475
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1476 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1477 {
1478 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1479 memcg_stat_format(memcg, s);
1480 else
1481 memcg1_stat_format(memcg, s);
1482 if (seq_buf_has_overflowed(s))
1483 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1484 }
1485
1486 /**
1487 * mem_cgroup_print_oom_context: Print OOM information relevant to
1488 * memory controller.
1489 * @memcg: The memory cgroup that went over limit
1490 * @p: Task that is going to be killed
1491 *
1492 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1493 * enabled
1494 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1495 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1496 {
1497 rcu_read_lock();
1498
1499 if (memcg) {
1500 pr_cont(",oom_memcg=");
1501 pr_cont_cgroup_path(memcg->css.cgroup);
1502 } else
1503 pr_cont(",global_oom");
1504 if (p) {
1505 pr_cont(",task_memcg=");
1506 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1507 }
1508 rcu_read_unlock();
1509 }
1510
1511 /**
1512 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1513 * memory controller.
1514 * @memcg: The memory cgroup that went over limit
1515 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1516 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1517 {
1518 /* Use static buffer, for the caller is holding oom_lock. */
1519 static char buf[PAGE_SIZE];
1520 struct seq_buf s;
1521
1522 lockdep_assert_held(&oom_lock);
1523
1524 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1525 K((u64)page_counter_read(&memcg->memory)),
1526 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1527 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1528 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1529 K((u64)page_counter_read(&memcg->swap)),
1530 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1531 #ifdef CONFIG_MEMCG_V1
1532 else {
1533 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1534 K((u64)page_counter_read(&memcg->memsw)),
1535 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1536 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1537 K((u64)page_counter_read(&memcg->kmem)),
1538 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1539 }
1540 #endif
1541
1542 pr_info("Memory cgroup stats for ");
1543 pr_cont_cgroup_path(memcg->css.cgroup);
1544 pr_cont(":");
1545 seq_buf_init(&s, buf, sizeof(buf));
1546 memory_stat_format(memcg, &s);
1547 seq_buf_do_printk(&s, KERN_INFO);
1548 }
1549
1550 /*
1551 * Return the memory (and swap, if configured) limit for a memcg.
1552 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1553 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1554 {
1555 unsigned long max = READ_ONCE(memcg->memory.max);
1556
1557 if (do_memsw_account()) {
1558 if (mem_cgroup_swappiness(memcg)) {
1559 /* Calculate swap excess capacity from memsw limit */
1560 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1561
1562 max += min(swap, (unsigned long)total_swap_pages);
1563 }
1564 } else {
1565 if (mem_cgroup_swappiness(memcg))
1566 max += min(READ_ONCE(memcg->swap.max),
1567 (unsigned long)total_swap_pages);
1568 }
1569 return max;
1570 }
1571
mem_cgroup_size(struct mem_cgroup * memcg)1572 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1573 {
1574 return page_counter_read(&memcg->memory);
1575 }
1576
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1577 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1578 int order)
1579 {
1580 struct oom_control oc = {
1581 .zonelist = NULL,
1582 .nodemask = NULL,
1583 .memcg = memcg,
1584 .gfp_mask = gfp_mask,
1585 .order = order,
1586 };
1587 bool ret = true;
1588
1589 if (mutex_lock_killable(&oom_lock))
1590 return true;
1591
1592 if (mem_cgroup_margin(memcg) >= (1 << order))
1593 goto unlock;
1594
1595 /*
1596 * A few threads which were not waiting at mutex_lock_killable() can
1597 * fail to bail out. Therefore, check again after holding oom_lock.
1598 */
1599 ret = task_is_dying() || out_of_memory(&oc);
1600
1601 unlock:
1602 mutex_unlock(&oom_lock);
1603 return ret;
1604 }
1605
1606 /*
1607 * Returns true if successfully killed one or more processes. Though in some
1608 * corner cases it can return true even without killing any process.
1609 */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1610 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1611 {
1612 bool locked, ret;
1613
1614 if (order > PAGE_ALLOC_COSTLY_ORDER)
1615 return false;
1616
1617 memcg_memory_event(memcg, MEMCG_OOM);
1618
1619 if (!memcg1_oom_prepare(memcg, &locked))
1620 return false;
1621
1622 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1623
1624 memcg1_oom_finish(memcg, locked);
1625
1626 return ret;
1627 }
1628
1629 /**
1630 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1631 * @victim: task to be killed by the OOM killer
1632 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1633 *
1634 * Returns a pointer to a memory cgroup, which has to be cleaned up
1635 * by killing all belonging OOM-killable tasks.
1636 *
1637 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1638 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1639 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1640 struct mem_cgroup *oom_domain)
1641 {
1642 struct mem_cgroup *oom_group = NULL;
1643 struct mem_cgroup *memcg;
1644
1645 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1646 return NULL;
1647
1648 if (!oom_domain)
1649 oom_domain = root_mem_cgroup;
1650
1651 rcu_read_lock();
1652
1653 memcg = mem_cgroup_from_task(victim);
1654 if (mem_cgroup_is_root(memcg))
1655 goto out;
1656
1657 /*
1658 * If the victim task has been asynchronously moved to a different
1659 * memory cgroup, we might end up killing tasks outside oom_domain.
1660 * In this case it's better to ignore memory.group.oom.
1661 */
1662 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1663 goto out;
1664
1665 /*
1666 * Traverse the memory cgroup hierarchy from the victim task's
1667 * cgroup up to the OOMing cgroup (or root) to find the
1668 * highest-level memory cgroup with oom.group set.
1669 */
1670 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1671 if (READ_ONCE(memcg->oom_group))
1672 oom_group = memcg;
1673
1674 if (memcg == oom_domain)
1675 break;
1676 }
1677
1678 if (oom_group)
1679 css_get(&oom_group->css);
1680 out:
1681 rcu_read_unlock();
1682
1683 return oom_group;
1684 }
1685
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1686 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1687 {
1688 pr_info("Tasks in ");
1689 pr_cont_cgroup_path(memcg->css.cgroup);
1690 pr_cont(" are going to be killed due to memory.oom.group set\n");
1691 }
1692
1693 struct memcg_stock_pcp {
1694 local_lock_t stock_lock;
1695 struct mem_cgroup *cached; /* this never be root cgroup */
1696 unsigned int nr_pages;
1697
1698 struct obj_cgroup *cached_objcg;
1699 struct pglist_data *cached_pgdat;
1700 unsigned int nr_bytes;
1701 int nr_slab_reclaimable_b;
1702 int nr_slab_unreclaimable_b;
1703
1704 struct work_struct work;
1705 unsigned long flags;
1706 #define FLUSHING_CACHED_CHARGE 0
1707 };
1708 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1709 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
1710 };
1711 static DEFINE_MUTEX(percpu_charge_mutex);
1712
1713 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1714 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1715 struct mem_cgroup *root_memcg);
1716
1717 /**
1718 * consume_stock: Try to consume stocked charge on this cpu.
1719 * @memcg: memcg to consume from.
1720 * @nr_pages: how many pages to charge.
1721 *
1722 * The charges will only happen if @memcg matches the current cpu's memcg
1723 * stock, and at least @nr_pages are available in that stock. Failure to
1724 * service an allocation will refill the stock.
1725 *
1726 * returns true if successful, false otherwise.
1727 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1728 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1729 {
1730 struct memcg_stock_pcp *stock;
1731 unsigned int stock_pages;
1732 unsigned long flags;
1733 bool ret = false;
1734
1735 if (nr_pages > MEMCG_CHARGE_BATCH)
1736 return ret;
1737
1738 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1739
1740 stock = this_cpu_ptr(&memcg_stock);
1741 stock_pages = READ_ONCE(stock->nr_pages);
1742 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1743 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1744 ret = true;
1745 }
1746
1747 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1748
1749 return ret;
1750 }
1751
1752 /*
1753 * Returns stocks cached in percpu and reset cached information.
1754 */
drain_stock(struct memcg_stock_pcp * stock)1755 static void drain_stock(struct memcg_stock_pcp *stock)
1756 {
1757 unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1758 struct mem_cgroup *old = READ_ONCE(stock->cached);
1759
1760 if (!old)
1761 return;
1762
1763 if (stock_pages) {
1764 page_counter_uncharge(&old->memory, stock_pages);
1765 if (do_memsw_account())
1766 page_counter_uncharge(&old->memsw, stock_pages);
1767
1768 WRITE_ONCE(stock->nr_pages, 0);
1769 }
1770
1771 css_put(&old->css);
1772 WRITE_ONCE(stock->cached, NULL);
1773 }
1774
drain_local_stock(struct work_struct * dummy)1775 static void drain_local_stock(struct work_struct *dummy)
1776 {
1777 struct memcg_stock_pcp *stock;
1778 struct obj_cgroup *old = NULL;
1779 unsigned long flags;
1780
1781 /*
1782 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1783 * drain_stock races is that we always operate on local CPU stock
1784 * here with IRQ disabled
1785 */
1786 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1787
1788 stock = this_cpu_ptr(&memcg_stock);
1789 old = drain_obj_stock(stock);
1790 drain_stock(stock);
1791 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1792
1793 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1794 obj_cgroup_put(old);
1795 }
1796
1797 /*
1798 * Cache charges(val) to local per_cpu area.
1799 * This will be consumed by consume_stock() function, later.
1800 */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1801 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1802 {
1803 struct memcg_stock_pcp *stock;
1804 unsigned int stock_pages;
1805
1806 stock = this_cpu_ptr(&memcg_stock);
1807 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1808 drain_stock(stock);
1809 css_get(&memcg->css);
1810 WRITE_ONCE(stock->cached, memcg);
1811 }
1812 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1813 WRITE_ONCE(stock->nr_pages, stock_pages);
1814
1815 if (stock_pages > MEMCG_CHARGE_BATCH)
1816 drain_stock(stock);
1817 }
1818
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1819 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1820 {
1821 unsigned long flags;
1822
1823 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1824 __refill_stock(memcg, nr_pages);
1825 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1826 }
1827
1828 /*
1829 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1830 * of the hierarchy under it.
1831 */
drain_all_stock(struct mem_cgroup * root_memcg)1832 void drain_all_stock(struct mem_cgroup *root_memcg)
1833 {
1834 int cpu, curcpu;
1835
1836 /* If someone's already draining, avoid adding running more workers. */
1837 if (!mutex_trylock(&percpu_charge_mutex))
1838 return;
1839 /*
1840 * Notify other cpus that system-wide "drain" is running
1841 * We do not care about races with the cpu hotplug because cpu down
1842 * as well as workers from this path always operate on the local
1843 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1844 */
1845 migrate_disable();
1846 curcpu = smp_processor_id();
1847 for_each_online_cpu(cpu) {
1848 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1849 struct mem_cgroup *memcg;
1850 bool flush = false;
1851
1852 rcu_read_lock();
1853 memcg = READ_ONCE(stock->cached);
1854 if (memcg && READ_ONCE(stock->nr_pages) &&
1855 mem_cgroup_is_descendant(memcg, root_memcg))
1856 flush = true;
1857 else if (obj_stock_flush_required(stock, root_memcg))
1858 flush = true;
1859 rcu_read_unlock();
1860
1861 if (flush &&
1862 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1863 if (cpu == curcpu)
1864 drain_local_stock(&stock->work);
1865 else if (!cpu_is_isolated(cpu))
1866 schedule_work_on(cpu, &stock->work);
1867 }
1868 }
1869 migrate_enable();
1870 mutex_unlock(&percpu_charge_mutex);
1871 }
1872
memcg_hotplug_cpu_dead(unsigned int cpu)1873 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1874 {
1875 struct memcg_stock_pcp *stock;
1876
1877 stock = &per_cpu(memcg_stock, cpu);
1878 drain_stock(stock);
1879
1880 return 0;
1881 }
1882
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1883 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1884 unsigned int nr_pages,
1885 gfp_t gfp_mask)
1886 {
1887 unsigned long nr_reclaimed = 0;
1888
1889 do {
1890 unsigned long pflags;
1891
1892 if (page_counter_read(&memcg->memory) <=
1893 READ_ONCE(memcg->memory.high))
1894 continue;
1895
1896 memcg_memory_event(memcg, MEMCG_HIGH);
1897
1898 psi_memstall_enter(&pflags);
1899 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1900 gfp_mask,
1901 MEMCG_RECLAIM_MAY_SWAP,
1902 NULL);
1903 psi_memstall_leave(&pflags);
1904 } while ((memcg = parent_mem_cgroup(memcg)) &&
1905 !mem_cgroup_is_root(memcg));
1906
1907 return nr_reclaimed;
1908 }
1909
high_work_func(struct work_struct * work)1910 static void high_work_func(struct work_struct *work)
1911 {
1912 struct mem_cgroup *memcg;
1913
1914 memcg = container_of(work, struct mem_cgroup, high_work);
1915 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1916 }
1917
1918 /*
1919 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1920 * enough to still cause a significant slowdown in most cases, while still
1921 * allowing diagnostics and tracing to proceed without becoming stuck.
1922 */
1923 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1924
1925 /*
1926 * When calculating the delay, we use these either side of the exponentiation to
1927 * maintain precision and scale to a reasonable number of jiffies (see the table
1928 * below.
1929 *
1930 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1931 * overage ratio to a delay.
1932 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1933 * proposed penalty in order to reduce to a reasonable number of jiffies, and
1934 * to produce a reasonable delay curve.
1935 *
1936 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1937 * reasonable delay curve compared to precision-adjusted overage, not
1938 * penalising heavily at first, but still making sure that growth beyond the
1939 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1940 * example, with a high of 100 megabytes:
1941 *
1942 * +-------+------------------------+
1943 * | usage | time to allocate in ms |
1944 * +-------+------------------------+
1945 * | 100M | 0 |
1946 * | 101M | 6 |
1947 * | 102M | 25 |
1948 * | 103M | 57 |
1949 * | 104M | 102 |
1950 * | 105M | 159 |
1951 * | 106M | 230 |
1952 * | 107M | 313 |
1953 * | 108M | 409 |
1954 * | 109M | 518 |
1955 * | 110M | 639 |
1956 * | 111M | 774 |
1957 * | 112M | 921 |
1958 * | 113M | 1081 |
1959 * | 114M | 1254 |
1960 * | 115M | 1439 |
1961 * | 116M | 1638 |
1962 * | 117M | 1849 |
1963 * | 118M | 2000 |
1964 * | 119M | 2000 |
1965 * | 120M | 2000 |
1966 * +-------+------------------------+
1967 */
1968 #define MEMCG_DELAY_PRECISION_SHIFT 20
1969 #define MEMCG_DELAY_SCALING_SHIFT 14
1970
calculate_overage(unsigned long usage,unsigned long high)1971 static u64 calculate_overage(unsigned long usage, unsigned long high)
1972 {
1973 u64 overage;
1974
1975 if (usage <= high)
1976 return 0;
1977
1978 /*
1979 * Prevent division by 0 in overage calculation by acting as if
1980 * it was a threshold of 1 page
1981 */
1982 high = max(high, 1UL);
1983
1984 overage = usage - high;
1985 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
1986 return div64_u64(overage, high);
1987 }
1988
mem_find_max_overage(struct mem_cgroup * memcg)1989 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
1990 {
1991 u64 overage, max_overage = 0;
1992
1993 do {
1994 overage = calculate_overage(page_counter_read(&memcg->memory),
1995 READ_ONCE(memcg->memory.high));
1996 max_overage = max(overage, max_overage);
1997 } while ((memcg = parent_mem_cgroup(memcg)) &&
1998 !mem_cgroup_is_root(memcg));
1999
2000 return max_overage;
2001 }
2002
swap_find_max_overage(struct mem_cgroup * memcg)2003 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2004 {
2005 u64 overage, max_overage = 0;
2006
2007 do {
2008 overage = calculate_overage(page_counter_read(&memcg->swap),
2009 READ_ONCE(memcg->swap.high));
2010 if (overage)
2011 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2012 max_overage = max(overage, max_overage);
2013 } while ((memcg = parent_mem_cgroup(memcg)) &&
2014 !mem_cgroup_is_root(memcg));
2015
2016 return max_overage;
2017 }
2018
2019 /*
2020 * Get the number of jiffies that we should penalise a mischievous cgroup which
2021 * is exceeding its memory.high by checking both it and its ancestors.
2022 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2023 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2024 unsigned int nr_pages,
2025 u64 max_overage)
2026 {
2027 unsigned long penalty_jiffies;
2028
2029 if (!max_overage)
2030 return 0;
2031
2032 /*
2033 * We use overage compared to memory.high to calculate the number of
2034 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2035 * fairly lenient on small overages, and increasingly harsh when the
2036 * memcg in question makes it clear that it has no intention of stopping
2037 * its crazy behaviour, so we exponentially increase the delay based on
2038 * overage amount.
2039 */
2040 penalty_jiffies = max_overage * max_overage * HZ;
2041 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2042 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2043
2044 /*
2045 * Factor in the task's own contribution to the overage, such that four
2046 * N-sized allocations are throttled approximately the same as one
2047 * 4N-sized allocation.
2048 *
2049 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2050 * larger the current charge patch is than that.
2051 */
2052 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2053 }
2054
2055 /*
2056 * Reclaims memory over the high limit. Called directly from
2057 * try_charge() (context permitting), as well as from the userland
2058 * return path where reclaim is always able to block.
2059 */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2060 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2061 {
2062 unsigned long penalty_jiffies;
2063 unsigned long pflags;
2064 unsigned long nr_reclaimed;
2065 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2066 int nr_retries = MAX_RECLAIM_RETRIES;
2067 struct mem_cgroup *memcg;
2068 bool in_retry = false;
2069
2070 if (likely(!nr_pages))
2071 return;
2072
2073 memcg = get_mem_cgroup_from_mm(current->mm);
2074 current->memcg_nr_pages_over_high = 0;
2075
2076 retry_reclaim:
2077 /*
2078 * Bail if the task is already exiting. Unlike memory.max,
2079 * memory.high enforcement isn't as strict, and there is no
2080 * OOM killer involved, which means the excess could already
2081 * be much bigger (and still growing) than it could for
2082 * memory.max; the dying task could get stuck in fruitless
2083 * reclaim for a long time, which isn't desirable.
2084 */
2085 if (task_is_dying())
2086 goto out;
2087
2088 /*
2089 * The allocating task should reclaim at least the batch size, but for
2090 * subsequent retries we only want to do what's necessary to prevent oom
2091 * or breaching resource isolation.
2092 *
2093 * This is distinct from memory.max or page allocator behaviour because
2094 * memory.high is currently batched, whereas memory.max and the page
2095 * allocator run every time an allocation is made.
2096 */
2097 nr_reclaimed = reclaim_high(memcg,
2098 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2099 gfp_mask);
2100
2101 /*
2102 * memory.high is breached and reclaim is unable to keep up. Throttle
2103 * allocators proactively to slow down excessive growth.
2104 */
2105 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2106 mem_find_max_overage(memcg));
2107
2108 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2109 swap_find_max_overage(memcg));
2110
2111 /*
2112 * Clamp the max delay per usermode return so as to still keep the
2113 * application moving forwards and also permit diagnostics, albeit
2114 * extremely slowly.
2115 */
2116 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2117
2118 /*
2119 * Don't sleep if the amount of jiffies this memcg owes us is so low
2120 * that it's not even worth doing, in an attempt to be nice to those who
2121 * go only a small amount over their memory.high value and maybe haven't
2122 * been aggressively reclaimed enough yet.
2123 */
2124 if (penalty_jiffies <= HZ / 100)
2125 goto out;
2126
2127 /*
2128 * If reclaim is making forward progress but we're still over
2129 * memory.high, we want to encourage that rather than doing allocator
2130 * throttling.
2131 */
2132 if (nr_reclaimed || nr_retries--) {
2133 in_retry = true;
2134 goto retry_reclaim;
2135 }
2136
2137 /*
2138 * Reclaim didn't manage to push usage below the limit, slow
2139 * this allocating task down.
2140 *
2141 * If we exit early, we're guaranteed to die (since
2142 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2143 * need to account for any ill-begotten jiffies to pay them off later.
2144 */
2145 psi_memstall_enter(&pflags);
2146 schedule_timeout_killable(penalty_jiffies);
2147 psi_memstall_leave(&pflags);
2148
2149 out:
2150 css_put(&memcg->css);
2151 }
2152
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2153 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2154 unsigned int nr_pages)
2155 {
2156 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2157 int nr_retries = MAX_RECLAIM_RETRIES;
2158 struct mem_cgroup *mem_over_limit;
2159 struct page_counter *counter;
2160 unsigned long nr_reclaimed;
2161 bool passed_oom = false;
2162 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2163 bool drained = false;
2164 bool raised_max_event = false;
2165 unsigned long pflags;
2166
2167 retry:
2168 if (consume_stock(memcg, nr_pages))
2169 return 0;
2170
2171 if (!do_memsw_account() ||
2172 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2173 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2174 goto done_restock;
2175 if (do_memsw_account())
2176 page_counter_uncharge(&memcg->memsw, batch);
2177 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2178 } else {
2179 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2180 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2181 }
2182
2183 if (batch > nr_pages) {
2184 batch = nr_pages;
2185 goto retry;
2186 }
2187
2188 /*
2189 * Prevent unbounded recursion when reclaim operations need to
2190 * allocate memory. This might exceed the limits temporarily,
2191 * but we prefer facilitating memory reclaim and getting back
2192 * under the limit over triggering OOM kills in these cases.
2193 */
2194 if (unlikely(current->flags & PF_MEMALLOC))
2195 goto force;
2196
2197 if (unlikely(task_in_memcg_oom(current)))
2198 goto nomem;
2199
2200 if (!gfpflags_allow_blocking(gfp_mask))
2201 goto nomem;
2202
2203 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2204 raised_max_event = true;
2205
2206 psi_memstall_enter(&pflags);
2207 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2208 gfp_mask, reclaim_options, NULL);
2209 psi_memstall_leave(&pflags);
2210
2211 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2212 goto retry;
2213
2214 if (!drained) {
2215 drain_all_stock(mem_over_limit);
2216 drained = true;
2217 goto retry;
2218 }
2219
2220 if (gfp_mask & __GFP_NORETRY)
2221 goto nomem;
2222 /*
2223 * Even though the limit is exceeded at this point, reclaim
2224 * may have been able to free some pages. Retry the charge
2225 * before killing the task.
2226 *
2227 * Only for regular pages, though: huge pages are rather
2228 * unlikely to succeed so close to the limit, and we fall back
2229 * to regular pages anyway in case of failure.
2230 */
2231 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2232 goto retry;
2233 /*
2234 * At task move, charge accounts can be doubly counted. So, it's
2235 * better to wait until the end of task_move if something is going on.
2236 */
2237 if (memcg1_wait_acct_move(mem_over_limit))
2238 goto retry;
2239
2240 if (nr_retries--)
2241 goto retry;
2242
2243 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2244 goto nomem;
2245
2246 /* Avoid endless loop for tasks bypassed by the oom killer */
2247 if (passed_oom && task_is_dying())
2248 goto nomem;
2249
2250 /*
2251 * keep retrying as long as the memcg oom killer is able to make
2252 * a forward progress or bypass the charge if the oom killer
2253 * couldn't make any progress.
2254 */
2255 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2256 get_order(nr_pages * PAGE_SIZE))) {
2257 passed_oom = true;
2258 nr_retries = MAX_RECLAIM_RETRIES;
2259 goto retry;
2260 }
2261 nomem:
2262 /*
2263 * Memcg doesn't have a dedicated reserve for atomic
2264 * allocations. But like the global atomic pool, we need to
2265 * put the burden of reclaim on regular allocation requests
2266 * and let these go through as privileged allocations.
2267 */
2268 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2269 return -ENOMEM;
2270 force:
2271 /*
2272 * If the allocation has to be enforced, don't forget to raise
2273 * a MEMCG_MAX event.
2274 */
2275 if (!raised_max_event)
2276 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2277
2278 /*
2279 * The allocation either can't fail or will lead to more memory
2280 * being freed very soon. Allow memory usage go over the limit
2281 * temporarily by force charging it.
2282 */
2283 page_counter_charge(&memcg->memory, nr_pages);
2284 if (do_memsw_account())
2285 page_counter_charge(&memcg->memsw, nr_pages);
2286
2287 return 0;
2288
2289 done_restock:
2290 if (batch > nr_pages)
2291 refill_stock(memcg, batch - nr_pages);
2292
2293 /*
2294 * If the hierarchy is above the normal consumption range, schedule
2295 * reclaim on returning to userland. We can perform reclaim here
2296 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2297 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2298 * not recorded as it most likely matches current's and won't
2299 * change in the meantime. As high limit is checked again before
2300 * reclaim, the cost of mismatch is negligible.
2301 */
2302 do {
2303 bool mem_high, swap_high;
2304
2305 mem_high = page_counter_read(&memcg->memory) >
2306 READ_ONCE(memcg->memory.high);
2307 swap_high = page_counter_read(&memcg->swap) >
2308 READ_ONCE(memcg->swap.high);
2309
2310 /* Don't bother a random interrupted task */
2311 if (!in_task()) {
2312 if (mem_high) {
2313 schedule_work(&memcg->high_work);
2314 break;
2315 }
2316 continue;
2317 }
2318
2319 if (mem_high || swap_high) {
2320 /*
2321 * The allocating tasks in this cgroup will need to do
2322 * reclaim or be throttled to prevent further growth
2323 * of the memory or swap footprints.
2324 *
2325 * Target some best-effort fairness between the tasks,
2326 * and distribute reclaim work and delay penalties
2327 * based on how much each task is actually allocating.
2328 */
2329 current->memcg_nr_pages_over_high += batch;
2330 set_notify_resume(current);
2331 break;
2332 }
2333 } while ((memcg = parent_mem_cgroup(memcg)));
2334
2335 /*
2336 * Reclaim is set up above to be called from the userland
2337 * return path. But also attempt synchronous reclaim to avoid
2338 * excessive overrun while the task is still inside the
2339 * kernel. If this is successful, the return path will see it
2340 * when it rechecks the overage and simply bail out.
2341 */
2342 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2343 !(current->flags & PF_MEMALLOC) &&
2344 gfpflags_allow_blocking(gfp_mask))
2345 mem_cgroup_handle_over_high(gfp_mask);
2346 return 0;
2347 }
2348
2349 /**
2350 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2351 * @memcg: memcg previously charged.
2352 * @nr_pages: number of pages previously charged.
2353 */
mem_cgroup_cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2354 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2355 {
2356 if (mem_cgroup_is_root(memcg))
2357 return;
2358
2359 page_counter_uncharge(&memcg->memory, nr_pages);
2360 if (do_memsw_account())
2361 page_counter_uncharge(&memcg->memsw, nr_pages);
2362 }
2363
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2364 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2365 {
2366 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2367 /*
2368 * Any of the following ensures page's memcg stability:
2369 *
2370 * - the page lock
2371 * - LRU isolation
2372 * - folio_memcg_lock()
2373 * - exclusive reference
2374 * - mem_cgroup_trylock_pages()
2375 */
2376 folio->memcg_data = (unsigned long)memcg;
2377 }
2378
2379 /**
2380 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2381 * @folio: folio to commit the charge to.
2382 * @memcg: memcg previously charged.
2383 */
mem_cgroup_commit_charge(struct folio * folio,struct mem_cgroup * memcg)2384 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2385 {
2386 css_get(&memcg->css);
2387 commit_charge(folio, memcg);
2388 memcg1_commit_charge(folio, memcg);
2389 }
2390
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2391 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2392 struct pglist_data *pgdat,
2393 enum node_stat_item idx, int nr)
2394 {
2395 struct mem_cgroup *memcg;
2396 struct lruvec *lruvec;
2397
2398 rcu_read_lock();
2399 memcg = obj_cgroup_memcg(objcg);
2400 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2401 __mod_memcg_lruvec_state(lruvec, idx, nr);
2402 rcu_read_unlock();
2403 }
2404
2405 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2406 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2407 {
2408 /*
2409 * Slab objects are accounted individually, not per-page.
2410 * Memcg membership data for each individual object is saved in
2411 * slab->obj_exts.
2412 */
2413 if (folio_test_slab(folio)) {
2414 struct slabobj_ext *obj_exts;
2415 struct slab *slab;
2416 unsigned int off;
2417
2418 slab = folio_slab(folio);
2419 obj_exts = slab_obj_exts(slab);
2420 if (!obj_exts)
2421 return NULL;
2422
2423 off = obj_to_index(slab->slab_cache, slab, p);
2424 if (obj_exts[off].objcg)
2425 return obj_cgroup_memcg(obj_exts[off].objcg);
2426
2427 return NULL;
2428 }
2429
2430 /*
2431 * folio_memcg_check() is used here, because in theory we can encounter
2432 * a folio where the slab flag has been cleared already, but
2433 * slab->obj_exts has not been freed yet
2434 * folio_memcg_check() will guarantee that a proper memory
2435 * cgroup pointer or NULL will be returned.
2436 */
2437 return folio_memcg_check(folio);
2438 }
2439
2440 /*
2441 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2442 * It is not suitable for objects allocated using vmalloc().
2443 *
2444 * A passed kernel object must be a slab object or a generic kernel page.
2445 *
2446 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2447 * cgroup_mutex, etc.
2448 */
mem_cgroup_from_slab_obj(void * p)2449 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2450 {
2451 if (mem_cgroup_disabled())
2452 return NULL;
2453
2454 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2455 }
2456
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2457 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2458 {
2459 struct obj_cgroup *objcg = NULL;
2460
2461 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2462 objcg = rcu_dereference(memcg->objcg);
2463 if (likely(objcg && obj_cgroup_tryget(objcg)))
2464 break;
2465 objcg = NULL;
2466 }
2467 return objcg;
2468 }
2469
current_objcg_update(void)2470 static struct obj_cgroup *current_objcg_update(void)
2471 {
2472 struct mem_cgroup *memcg;
2473 struct obj_cgroup *old, *objcg = NULL;
2474
2475 do {
2476 /* Atomically drop the update bit. */
2477 old = xchg(¤t->objcg, NULL);
2478 if (old) {
2479 old = (struct obj_cgroup *)
2480 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2481 obj_cgroup_put(old);
2482
2483 old = NULL;
2484 }
2485
2486 /* If new objcg is NULL, no reason for the second atomic update. */
2487 if (!current->mm || (current->flags & PF_KTHREAD))
2488 return NULL;
2489
2490 /*
2491 * Release the objcg pointer from the previous iteration,
2492 * if try_cmpxcg() below fails.
2493 */
2494 if (unlikely(objcg)) {
2495 obj_cgroup_put(objcg);
2496 objcg = NULL;
2497 }
2498
2499 /*
2500 * Obtain the new objcg pointer. The current task can be
2501 * asynchronously moved to another memcg and the previous
2502 * memcg can be offlined. So let's get the memcg pointer
2503 * and try get a reference to objcg under a rcu read lock.
2504 */
2505
2506 rcu_read_lock();
2507 memcg = mem_cgroup_from_task(current);
2508 objcg = __get_obj_cgroup_from_memcg(memcg);
2509 rcu_read_unlock();
2510
2511 /*
2512 * Try set up a new objcg pointer atomically. If it
2513 * fails, it means the update flag was set concurrently, so
2514 * the whole procedure should be repeated.
2515 */
2516 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2517
2518 return objcg;
2519 }
2520
current_obj_cgroup(void)2521 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2522 {
2523 struct mem_cgroup *memcg;
2524 struct obj_cgroup *objcg;
2525
2526 if (in_task()) {
2527 memcg = current->active_memcg;
2528 if (unlikely(memcg))
2529 goto from_memcg;
2530
2531 objcg = READ_ONCE(current->objcg);
2532 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2533 objcg = current_objcg_update();
2534 /*
2535 * Objcg reference is kept by the task, so it's safe
2536 * to use the objcg by the current task.
2537 */
2538 return objcg;
2539 }
2540
2541 memcg = this_cpu_read(int_active_memcg);
2542 if (unlikely(memcg))
2543 goto from_memcg;
2544
2545 return NULL;
2546
2547 from_memcg:
2548 objcg = NULL;
2549 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2550 /*
2551 * Memcg pointer is protected by scope (see set_active_memcg())
2552 * and is pinning the corresponding objcg, so objcg can't go
2553 * away and can be used within the scope without any additional
2554 * protection.
2555 */
2556 objcg = rcu_dereference_check(memcg->objcg, 1);
2557 if (likely(objcg))
2558 break;
2559 }
2560
2561 return objcg;
2562 }
2563
get_obj_cgroup_from_folio(struct folio * folio)2564 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2565 {
2566 struct obj_cgroup *objcg;
2567
2568 if (!memcg_kmem_online())
2569 return NULL;
2570
2571 if (folio_memcg_kmem(folio)) {
2572 objcg = __folio_objcg(folio);
2573 obj_cgroup_get(objcg);
2574 } else {
2575 struct mem_cgroup *memcg;
2576
2577 rcu_read_lock();
2578 memcg = __folio_memcg(folio);
2579 if (memcg)
2580 objcg = __get_obj_cgroup_from_memcg(memcg);
2581 else
2582 objcg = NULL;
2583 rcu_read_unlock();
2584 }
2585 return objcg;
2586 }
2587
2588 /*
2589 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2590 * @objcg: object cgroup to uncharge
2591 * @nr_pages: number of pages to uncharge
2592 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2593 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2594 unsigned int nr_pages)
2595 {
2596 struct mem_cgroup *memcg;
2597
2598 memcg = get_mem_cgroup_from_objcg(objcg);
2599
2600 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2601 memcg1_account_kmem(memcg, -nr_pages);
2602 refill_stock(memcg, nr_pages);
2603
2604 css_put(&memcg->css);
2605 }
2606
2607 /*
2608 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2609 * @objcg: object cgroup to charge
2610 * @gfp: reclaim mode
2611 * @nr_pages: number of pages to charge
2612 *
2613 * Returns 0 on success, an error code on failure.
2614 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2615 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2616 unsigned int nr_pages)
2617 {
2618 struct mem_cgroup *memcg;
2619 int ret;
2620
2621 memcg = get_mem_cgroup_from_objcg(objcg);
2622
2623 ret = try_charge_memcg(memcg, gfp, nr_pages);
2624 if (ret)
2625 goto out;
2626
2627 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2628 memcg1_account_kmem(memcg, nr_pages);
2629 out:
2630 css_put(&memcg->css);
2631
2632 return ret;
2633 }
2634
2635 /**
2636 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2637 * @page: page to charge
2638 * @gfp: reclaim mode
2639 * @order: allocation order
2640 *
2641 * Returns 0 on success, an error code on failure.
2642 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2643 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2644 {
2645 struct obj_cgroup *objcg;
2646 int ret = 0;
2647
2648 objcg = current_obj_cgroup();
2649 if (objcg) {
2650 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2651 if (!ret) {
2652 obj_cgroup_get(objcg);
2653 page->memcg_data = (unsigned long)objcg |
2654 MEMCG_DATA_KMEM;
2655 return 0;
2656 }
2657 }
2658 return ret;
2659 }
2660
2661 /**
2662 * __memcg_kmem_uncharge_page: uncharge a kmem page
2663 * @page: page to uncharge
2664 * @order: allocation order
2665 */
__memcg_kmem_uncharge_page(struct page * page,int order)2666 void __memcg_kmem_uncharge_page(struct page *page, int order)
2667 {
2668 struct folio *folio = page_folio(page);
2669 struct obj_cgroup *objcg;
2670 unsigned int nr_pages = 1 << order;
2671
2672 if (!folio_memcg_kmem(folio))
2673 return;
2674
2675 objcg = __folio_objcg(folio);
2676 obj_cgroup_uncharge_pages(objcg, nr_pages);
2677 folio->memcg_data = 0;
2678 obj_cgroup_put(objcg);
2679 }
2680
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2681 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2682 enum node_stat_item idx, int nr)
2683 {
2684 struct memcg_stock_pcp *stock;
2685 struct obj_cgroup *old = NULL;
2686 unsigned long flags;
2687 int *bytes;
2688
2689 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2690 stock = this_cpu_ptr(&memcg_stock);
2691
2692 /*
2693 * Save vmstat data in stock and skip vmstat array update unless
2694 * accumulating over a page of vmstat data or when pgdat or idx
2695 * changes.
2696 */
2697 if (READ_ONCE(stock->cached_objcg) != objcg) {
2698 old = drain_obj_stock(stock);
2699 obj_cgroup_get(objcg);
2700 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2701 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2702 WRITE_ONCE(stock->cached_objcg, objcg);
2703 stock->cached_pgdat = pgdat;
2704 } else if (stock->cached_pgdat != pgdat) {
2705 /* Flush the existing cached vmstat data */
2706 struct pglist_data *oldpg = stock->cached_pgdat;
2707
2708 if (stock->nr_slab_reclaimable_b) {
2709 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2710 stock->nr_slab_reclaimable_b);
2711 stock->nr_slab_reclaimable_b = 0;
2712 }
2713 if (stock->nr_slab_unreclaimable_b) {
2714 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2715 stock->nr_slab_unreclaimable_b);
2716 stock->nr_slab_unreclaimable_b = 0;
2717 }
2718 stock->cached_pgdat = pgdat;
2719 }
2720
2721 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2722 : &stock->nr_slab_unreclaimable_b;
2723 /*
2724 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2725 * cached locally at least once before pushing it out.
2726 */
2727 if (!*bytes) {
2728 *bytes = nr;
2729 nr = 0;
2730 } else {
2731 *bytes += nr;
2732 if (abs(*bytes) > PAGE_SIZE) {
2733 nr = *bytes;
2734 *bytes = 0;
2735 } else {
2736 nr = 0;
2737 }
2738 }
2739 if (nr)
2740 __mod_objcg_mlstate(objcg, pgdat, idx, nr);
2741
2742 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2743 obj_cgroup_put(old);
2744 }
2745
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2746 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2747 {
2748 struct memcg_stock_pcp *stock;
2749 unsigned long flags;
2750 bool ret = false;
2751
2752 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2753
2754 stock = this_cpu_ptr(&memcg_stock);
2755 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2756 stock->nr_bytes -= nr_bytes;
2757 ret = true;
2758 }
2759
2760 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2761
2762 return ret;
2763 }
2764
drain_obj_stock(struct memcg_stock_pcp * stock)2765 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2766 {
2767 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2768
2769 if (!old)
2770 return NULL;
2771
2772 if (stock->nr_bytes) {
2773 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2774 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2775
2776 if (nr_pages) {
2777 struct mem_cgroup *memcg;
2778
2779 memcg = get_mem_cgroup_from_objcg(old);
2780
2781 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2782 memcg1_account_kmem(memcg, -nr_pages);
2783 __refill_stock(memcg, nr_pages);
2784
2785 css_put(&memcg->css);
2786 }
2787
2788 /*
2789 * The leftover is flushed to the centralized per-memcg value.
2790 * On the next attempt to refill obj stock it will be moved
2791 * to a per-cpu stock (probably, on an other CPU), see
2792 * refill_obj_stock().
2793 *
2794 * How often it's flushed is a trade-off between the memory
2795 * limit enforcement accuracy and potential CPU contention,
2796 * so it might be changed in the future.
2797 */
2798 atomic_add(nr_bytes, &old->nr_charged_bytes);
2799 stock->nr_bytes = 0;
2800 }
2801
2802 /*
2803 * Flush the vmstat data in current stock
2804 */
2805 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2806 if (stock->nr_slab_reclaimable_b) {
2807 __mod_objcg_mlstate(old, stock->cached_pgdat,
2808 NR_SLAB_RECLAIMABLE_B,
2809 stock->nr_slab_reclaimable_b);
2810 stock->nr_slab_reclaimable_b = 0;
2811 }
2812 if (stock->nr_slab_unreclaimable_b) {
2813 __mod_objcg_mlstate(old, stock->cached_pgdat,
2814 NR_SLAB_UNRECLAIMABLE_B,
2815 stock->nr_slab_unreclaimable_b);
2816 stock->nr_slab_unreclaimable_b = 0;
2817 }
2818 stock->cached_pgdat = NULL;
2819 }
2820
2821 WRITE_ONCE(stock->cached_objcg, NULL);
2822 /*
2823 * The `old' objects needs to be released by the caller via
2824 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2825 */
2826 return old;
2827 }
2828
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2829 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2830 struct mem_cgroup *root_memcg)
2831 {
2832 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2833 struct mem_cgroup *memcg;
2834
2835 if (objcg) {
2836 memcg = obj_cgroup_memcg(objcg);
2837 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2838 return true;
2839 }
2840
2841 return false;
2842 }
2843
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2844 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2845 bool allow_uncharge)
2846 {
2847 struct memcg_stock_pcp *stock;
2848 struct obj_cgroup *old = NULL;
2849 unsigned long flags;
2850 unsigned int nr_pages = 0;
2851
2852 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2853
2854 stock = this_cpu_ptr(&memcg_stock);
2855 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2856 old = drain_obj_stock(stock);
2857 obj_cgroup_get(objcg);
2858 WRITE_ONCE(stock->cached_objcg, objcg);
2859 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2860 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2861 allow_uncharge = true; /* Allow uncharge when objcg changes */
2862 }
2863 stock->nr_bytes += nr_bytes;
2864
2865 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2866 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2867 stock->nr_bytes &= (PAGE_SIZE - 1);
2868 }
2869
2870 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2871 obj_cgroup_put(old);
2872
2873 if (nr_pages)
2874 obj_cgroup_uncharge_pages(objcg, nr_pages);
2875 }
2876
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2877 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2878 {
2879 unsigned int nr_pages, nr_bytes;
2880 int ret;
2881
2882 if (consume_obj_stock(objcg, size))
2883 return 0;
2884
2885 /*
2886 * In theory, objcg->nr_charged_bytes can have enough
2887 * pre-charged bytes to satisfy the allocation. However,
2888 * flushing objcg->nr_charged_bytes requires two atomic
2889 * operations, and objcg->nr_charged_bytes can't be big.
2890 * The shared objcg->nr_charged_bytes can also become a
2891 * performance bottleneck if all tasks of the same memcg are
2892 * trying to update it. So it's better to ignore it and try
2893 * grab some new pages. The stock's nr_bytes will be flushed to
2894 * objcg->nr_charged_bytes later on when objcg changes.
2895 *
2896 * The stock's nr_bytes may contain enough pre-charged bytes
2897 * to allow one less page from being charged, but we can't rely
2898 * on the pre-charged bytes not being changed outside of
2899 * consume_obj_stock() or refill_obj_stock(). So ignore those
2900 * pre-charged bytes as well when charging pages. To avoid a
2901 * page uncharge right after a page charge, we set the
2902 * allow_uncharge flag to false when calling refill_obj_stock()
2903 * to temporarily allow the pre-charged bytes to exceed the page
2904 * size limit. The maximum reachable value of the pre-charged
2905 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2906 * race.
2907 */
2908 nr_pages = size >> PAGE_SHIFT;
2909 nr_bytes = size & (PAGE_SIZE - 1);
2910
2911 if (nr_bytes)
2912 nr_pages += 1;
2913
2914 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2915 if (!ret && nr_bytes)
2916 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2917
2918 return ret;
2919 }
2920
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)2921 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2922 {
2923 refill_obj_stock(objcg, size, true);
2924 }
2925
obj_full_size(struct kmem_cache * s)2926 static inline size_t obj_full_size(struct kmem_cache *s)
2927 {
2928 /*
2929 * For each accounted object there is an extra space which is used
2930 * to store obj_cgroup membership. Charge it too.
2931 */
2932 return s->size + sizeof(struct obj_cgroup *);
2933 }
2934
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2935 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2936 gfp_t flags, size_t size, void **p)
2937 {
2938 struct obj_cgroup *objcg;
2939 struct slab *slab;
2940 unsigned long off;
2941 size_t i;
2942
2943 /*
2944 * The obtained objcg pointer is safe to use within the current scope,
2945 * defined by current task or set_active_memcg() pair.
2946 * obj_cgroup_get() is used to get a permanent reference.
2947 */
2948 objcg = current_obj_cgroup();
2949 if (!objcg)
2950 return true;
2951
2952 /*
2953 * slab_alloc_node() avoids the NULL check, so we might be called with a
2954 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2955 * the whole requested size.
2956 * return success as there's nothing to free back
2957 */
2958 if (unlikely(*p == NULL))
2959 return true;
2960
2961 flags &= gfp_allowed_mask;
2962
2963 if (lru) {
2964 int ret;
2965 struct mem_cgroup *memcg;
2966
2967 memcg = get_mem_cgroup_from_objcg(objcg);
2968 ret = memcg_list_lru_alloc(memcg, lru, flags);
2969 css_put(&memcg->css);
2970
2971 if (ret)
2972 return false;
2973 }
2974
2975 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
2976 return false;
2977
2978 for (i = 0; i < size; i++) {
2979 slab = virt_to_slab(p[i]);
2980
2981 if (!slab_obj_exts(slab) &&
2982 alloc_slab_obj_exts(slab, s, flags, false)) {
2983 obj_cgroup_uncharge(objcg, obj_full_size(s));
2984 continue;
2985 }
2986
2987 off = obj_to_index(s, slab, p[i]);
2988 obj_cgroup_get(objcg);
2989 slab_obj_exts(slab)[off].objcg = objcg;
2990 mod_objcg_state(objcg, slab_pgdat(slab),
2991 cache_vmstat_idx(s), obj_full_size(s));
2992 }
2993
2994 return true;
2995 }
2996
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)2997 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2998 void **p, int objects, struct slabobj_ext *obj_exts)
2999 {
3000 for (int i = 0; i < objects; i++) {
3001 struct obj_cgroup *objcg;
3002 unsigned int off;
3003
3004 off = obj_to_index(s, slab, p[i]);
3005 objcg = obj_exts[off].objcg;
3006 if (!objcg)
3007 continue;
3008
3009 obj_exts[off].objcg = NULL;
3010 obj_cgroup_uncharge(objcg, obj_full_size(s));
3011 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3012 -obj_full_size(s));
3013 obj_cgroup_put(objcg);
3014 }
3015 }
3016
3017 /*
3018 * Because folio_memcg(head) is not set on tails, set it now.
3019 */
split_page_memcg(struct page * head,int old_order,int new_order)3020 void split_page_memcg(struct page *head, int old_order, int new_order)
3021 {
3022 struct folio *folio = page_folio(head);
3023 int i;
3024 unsigned int old_nr = 1 << old_order;
3025 unsigned int new_nr = 1 << new_order;
3026
3027 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3028 return;
3029
3030 for (i = new_nr; i < old_nr; i += new_nr)
3031 folio_page(folio, i)->memcg_data = folio->memcg_data;
3032
3033 if (folio_memcg_kmem(folio))
3034 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3035 else
3036 css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3037 }
3038
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3039 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3040 {
3041 unsigned long val;
3042
3043 if (mem_cgroup_is_root(memcg)) {
3044 /*
3045 * Approximate root's usage from global state. This isn't
3046 * perfect, but the root usage was always an approximation.
3047 */
3048 val = global_node_page_state(NR_FILE_PAGES) +
3049 global_node_page_state(NR_ANON_MAPPED);
3050 if (swap)
3051 val += total_swap_pages - get_nr_swap_pages();
3052 } else {
3053 if (!swap)
3054 val = page_counter_read(&memcg->memory);
3055 else
3056 val = page_counter_read(&memcg->memsw);
3057 }
3058 return val;
3059 }
3060
memcg_online_kmem(struct mem_cgroup * memcg)3061 static int memcg_online_kmem(struct mem_cgroup *memcg)
3062 {
3063 struct obj_cgroup *objcg;
3064
3065 if (mem_cgroup_kmem_disabled())
3066 return 0;
3067
3068 if (unlikely(mem_cgroup_is_root(memcg)))
3069 return 0;
3070
3071 objcg = obj_cgroup_alloc();
3072 if (!objcg)
3073 return -ENOMEM;
3074
3075 objcg->memcg = memcg;
3076 rcu_assign_pointer(memcg->objcg, objcg);
3077 obj_cgroup_get(objcg);
3078 memcg->orig_objcg = objcg;
3079
3080 static_branch_enable(&memcg_kmem_online_key);
3081
3082 memcg->kmemcg_id = memcg->id.id;
3083
3084 return 0;
3085 }
3086
memcg_offline_kmem(struct mem_cgroup * memcg)3087 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3088 {
3089 struct mem_cgroup *parent;
3090
3091 if (mem_cgroup_kmem_disabled())
3092 return;
3093
3094 if (unlikely(mem_cgroup_is_root(memcg)))
3095 return;
3096
3097 parent = parent_mem_cgroup(memcg);
3098 if (!parent)
3099 parent = root_mem_cgroup;
3100
3101 memcg_reparent_objcgs(memcg, parent);
3102
3103 /*
3104 * After we have finished memcg_reparent_objcgs(), all list_lrus
3105 * corresponding to this cgroup are guaranteed to remain empty.
3106 * The ordering is imposed by list_lru_node->lock taken by
3107 * memcg_reparent_list_lrus().
3108 */
3109 memcg_reparent_list_lrus(memcg, parent);
3110 }
3111
3112 #ifdef CONFIG_CGROUP_WRITEBACK
3113
3114 #include <trace/events/writeback.h>
3115
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3116 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3117 {
3118 return wb_domain_init(&memcg->cgwb_domain, gfp);
3119 }
3120
memcg_wb_domain_exit(struct mem_cgroup * memcg)3121 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3122 {
3123 wb_domain_exit(&memcg->cgwb_domain);
3124 }
3125
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3126 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3127 {
3128 wb_domain_size_changed(&memcg->cgwb_domain);
3129 }
3130
mem_cgroup_wb_domain(struct bdi_writeback * wb)3131 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3132 {
3133 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3134
3135 if (!memcg->css.parent)
3136 return NULL;
3137
3138 return &memcg->cgwb_domain;
3139 }
3140
3141 /**
3142 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3143 * @wb: bdi_writeback in question
3144 * @pfilepages: out parameter for number of file pages
3145 * @pheadroom: out parameter for number of allocatable pages according to memcg
3146 * @pdirty: out parameter for number of dirty pages
3147 * @pwriteback: out parameter for number of pages under writeback
3148 *
3149 * Determine the numbers of file, headroom, dirty, and writeback pages in
3150 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3151 * is a bit more involved.
3152 *
3153 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3154 * headroom is calculated as the lowest headroom of itself and the
3155 * ancestors. Note that this doesn't consider the actual amount of
3156 * available memory in the system. The caller should further cap
3157 * *@pheadroom accordingly.
3158 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3159 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3160 unsigned long *pheadroom, unsigned long *pdirty,
3161 unsigned long *pwriteback)
3162 {
3163 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3164 struct mem_cgroup *parent;
3165
3166 mem_cgroup_flush_stats_ratelimited(memcg);
3167
3168 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3169 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3170 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3171 memcg_page_state(memcg, NR_ACTIVE_FILE);
3172
3173 *pheadroom = PAGE_COUNTER_MAX;
3174 while ((parent = parent_mem_cgroup(memcg))) {
3175 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3176 READ_ONCE(memcg->memory.high));
3177 unsigned long used = page_counter_read(&memcg->memory);
3178
3179 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3180 memcg = parent;
3181 }
3182 }
3183
3184 /*
3185 * Foreign dirty flushing
3186 *
3187 * There's an inherent mismatch between memcg and writeback. The former
3188 * tracks ownership per-page while the latter per-inode. This was a
3189 * deliberate design decision because honoring per-page ownership in the
3190 * writeback path is complicated, may lead to higher CPU and IO overheads
3191 * and deemed unnecessary given that write-sharing an inode across
3192 * different cgroups isn't a common use-case.
3193 *
3194 * Combined with inode majority-writer ownership switching, this works well
3195 * enough in most cases but there are some pathological cases. For
3196 * example, let's say there are two cgroups A and B which keep writing to
3197 * different but confined parts of the same inode. B owns the inode and
3198 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3199 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3200 * triggering background writeback. A will be slowed down without a way to
3201 * make writeback of the dirty pages happen.
3202 *
3203 * Conditions like the above can lead to a cgroup getting repeatedly and
3204 * severely throttled after making some progress after each
3205 * dirty_expire_interval while the underlying IO device is almost
3206 * completely idle.
3207 *
3208 * Solving this problem completely requires matching the ownership tracking
3209 * granularities between memcg and writeback in either direction. However,
3210 * the more egregious behaviors can be avoided by simply remembering the
3211 * most recent foreign dirtying events and initiating remote flushes on
3212 * them when local writeback isn't enough to keep the memory clean enough.
3213 *
3214 * The following two functions implement such mechanism. When a foreign
3215 * page - a page whose memcg and writeback ownerships don't match - is
3216 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3217 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3218 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3219 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3220 * foreign bdi_writebacks which haven't expired. Both the numbers of
3221 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3222 * limited to MEMCG_CGWB_FRN_CNT.
3223 *
3224 * The mechanism only remembers IDs and doesn't hold any object references.
3225 * As being wrong occasionally doesn't matter, updates and accesses to the
3226 * records are lockless and racy.
3227 */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3228 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3229 struct bdi_writeback *wb)
3230 {
3231 struct mem_cgroup *memcg = folio_memcg(folio);
3232 struct memcg_cgwb_frn *frn;
3233 u64 now = get_jiffies_64();
3234 u64 oldest_at = now;
3235 int oldest = -1;
3236 int i;
3237
3238 trace_track_foreign_dirty(folio, wb);
3239
3240 /*
3241 * Pick the slot to use. If there is already a slot for @wb, keep
3242 * using it. If not replace the oldest one which isn't being
3243 * written out.
3244 */
3245 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3246 frn = &memcg->cgwb_frn[i];
3247 if (frn->bdi_id == wb->bdi->id &&
3248 frn->memcg_id == wb->memcg_css->id)
3249 break;
3250 if (time_before64(frn->at, oldest_at) &&
3251 atomic_read(&frn->done.cnt) == 1) {
3252 oldest = i;
3253 oldest_at = frn->at;
3254 }
3255 }
3256
3257 if (i < MEMCG_CGWB_FRN_CNT) {
3258 /*
3259 * Re-using an existing one. Update timestamp lazily to
3260 * avoid making the cacheline hot. We want them to be
3261 * reasonably up-to-date and significantly shorter than
3262 * dirty_expire_interval as that's what expires the record.
3263 * Use the shorter of 1s and dirty_expire_interval / 8.
3264 */
3265 unsigned long update_intv =
3266 min_t(unsigned long, HZ,
3267 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3268
3269 if (time_before64(frn->at, now - update_intv))
3270 frn->at = now;
3271 } else if (oldest >= 0) {
3272 /* replace the oldest free one */
3273 frn = &memcg->cgwb_frn[oldest];
3274 frn->bdi_id = wb->bdi->id;
3275 frn->memcg_id = wb->memcg_css->id;
3276 frn->at = now;
3277 }
3278 }
3279
3280 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3281 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3282 {
3283 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3284 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3285 u64 now = jiffies_64;
3286 int i;
3287
3288 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3289 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3290
3291 /*
3292 * If the record is older than dirty_expire_interval,
3293 * writeback on it has already started. No need to kick it
3294 * off again. Also, don't start a new one if there's
3295 * already one in flight.
3296 */
3297 if (time_after64(frn->at, now - intv) &&
3298 atomic_read(&frn->done.cnt) == 1) {
3299 frn->at = 0;
3300 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3301 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3302 WB_REASON_FOREIGN_FLUSH,
3303 &frn->done);
3304 }
3305 }
3306 }
3307
3308 #else /* CONFIG_CGROUP_WRITEBACK */
3309
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3310 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3311 {
3312 return 0;
3313 }
3314
memcg_wb_domain_exit(struct mem_cgroup * memcg)3315 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3316 {
3317 }
3318
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3319 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3320 {
3321 }
3322
3323 #endif /* CONFIG_CGROUP_WRITEBACK */
3324
3325 /*
3326 * Private memory cgroup IDR
3327 *
3328 * Swap-out records and page cache shadow entries need to store memcg
3329 * references in constrained space, so we maintain an ID space that is
3330 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3331 * memory-controlled cgroups to 64k.
3332 *
3333 * However, there usually are many references to the offline CSS after
3334 * the cgroup has been destroyed, such as page cache or reclaimable
3335 * slab objects, that don't need to hang on to the ID. We want to keep
3336 * those dead CSS from occupying IDs, or we might quickly exhaust the
3337 * relatively small ID space and prevent the creation of new cgroups
3338 * even when there are much fewer than 64k cgroups - possibly none.
3339 *
3340 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3341 * be freed and recycled when it's no longer needed, which is usually
3342 * when the CSS is offlined.
3343 *
3344 * The only exception to that are records of swapped out tmpfs/shmem
3345 * pages that need to be attributed to live ancestors on swapin. But
3346 * those references are manageable from userspace.
3347 */
3348
3349 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3350 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3351
mem_cgroup_id_remove(struct mem_cgroup * memcg)3352 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3353 {
3354 if (memcg->id.id > 0) {
3355 xa_erase(&mem_cgroup_ids, memcg->id.id);
3356 memcg->id.id = 0;
3357 }
3358 }
3359
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3360 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3361 unsigned int n)
3362 {
3363 refcount_add(n, &memcg->id.ref);
3364 }
3365
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3366 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3367 {
3368 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3369 mem_cgroup_id_remove(memcg);
3370
3371 /* Memcg ID pins CSS */
3372 css_put(&memcg->css);
3373 }
3374 }
3375
mem_cgroup_id_put(struct mem_cgroup * memcg)3376 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3377 {
3378 mem_cgroup_id_put_many(memcg, 1);
3379 }
3380
3381 /**
3382 * mem_cgroup_from_id - look up a memcg from a memcg id
3383 * @id: the memcg id to look up
3384 *
3385 * Caller must hold rcu_read_lock().
3386 */
mem_cgroup_from_id(unsigned short id)3387 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3388 {
3389 WARN_ON_ONCE(!rcu_read_lock_held());
3390 return xa_load(&mem_cgroup_ids, id);
3391 }
3392
3393 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3394 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3395 {
3396 struct cgroup *cgrp;
3397 struct cgroup_subsys_state *css;
3398 struct mem_cgroup *memcg;
3399
3400 cgrp = cgroup_get_from_id(ino);
3401 if (IS_ERR(cgrp))
3402 return ERR_CAST(cgrp);
3403
3404 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3405 if (css)
3406 memcg = container_of(css, struct mem_cgroup, css);
3407 else
3408 memcg = ERR_PTR(-ENOENT);
3409
3410 cgroup_put(cgrp);
3411
3412 return memcg;
3413 }
3414 #endif
3415
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3416 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3417 {
3418 struct mem_cgroup_per_node *pn;
3419
3420 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3421 if (!pn)
3422 return false;
3423
3424 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3425 GFP_KERNEL_ACCOUNT, node);
3426 if (!pn->lruvec_stats)
3427 goto fail;
3428
3429 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3430 GFP_KERNEL_ACCOUNT);
3431 if (!pn->lruvec_stats_percpu)
3432 goto fail;
3433
3434 lruvec_init(&pn->lruvec);
3435 pn->memcg = memcg;
3436
3437 memcg->nodeinfo[node] = pn;
3438 return true;
3439 fail:
3440 kfree(pn->lruvec_stats);
3441 kfree(pn);
3442 return false;
3443 }
3444
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3445 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3446 {
3447 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3448
3449 if (!pn)
3450 return;
3451
3452 free_percpu(pn->lruvec_stats_percpu);
3453 kfree(pn->lruvec_stats);
3454 kfree(pn);
3455 }
3456
__mem_cgroup_free(struct mem_cgroup * memcg)3457 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3458 {
3459 int node;
3460
3461 obj_cgroup_put(memcg->orig_objcg);
3462
3463 for_each_node(node)
3464 free_mem_cgroup_per_node_info(memcg, node);
3465 memcg1_free_events(memcg);
3466 kfree(memcg->vmstats);
3467 free_percpu(memcg->vmstats_percpu);
3468 kfree(memcg);
3469 }
3470
mem_cgroup_free(struct mem_cgroup * memcg)3471 static void mem_cgroup_free(struct mem_cgroup *memcg)
3472 {
3473 lru_gen_exit_memcg(memcg);
3474 memcg_wb_domain_exit(memcg);
3475 __mem_cgroup_free(memcg);
3476 }
3477
mem_cgroup_alloc(struct mem_cgroup * parent)3478 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3479 {
3480 struct memcg_vmstats_percpu *statc, *pstatc;
3481 struct mem_cgroup *memcg;
3482 int node, cpu;
3483 int __maybe_unused i;
3484 long error;
3485
3486 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3487 if (!memcg)
3488 return ERR_PTR(-ENOMEM);
3489
3490 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3491 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3492 if (error)
3493 goto fail;
3494 error = -ENOMEM;
3495
3496 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3497 GFP_KERNEL_ACCOUNT);
3498 if (!memcg->vmstats)
3499 goto fail;
3500
3501 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3502 GFP_KERNEL_ACCOUNT);
3503 if (!memcg->vmstats_percpu)
3504 goto fail;
3505
3506 if (!memcg1_alloc_events(memcg))
3507 goto fail;
3508
3509 for_each_possible_cpu(cpu) {
3510 if (parent)
3511 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3512 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3513 statc->parent = parent ? pstatc : NULL;
3514 statc->vmstats = memcg->vmstats;
3515 }
3516
3517 for_each_node(node)
3518 if (!alloc_mem_cgroup_per_node_info(memcg, node))
3519 goto fail;
3520
3521 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3522 goto fail;
3523
3524 INIT_WORK(&memcg->high_work, high_work_func);
3525 vmpressure_init(&memcg->vmpressure);
3526 INIT_LIST_HEAD(&memcg->memory_peaks);
3527 INIT_LIST_HEAD(&memcg->swap_peaks);
3528 spin_lock_init(&memcg->peaks_lock);
3529 memcg->socket_pressure = jiffies;
3530 memcg1_memcg_init(memcg);
3531 memcg->kmemcg_id = -1;
3532 INIT_LIST_HEAD(&memcg->objcg_list);
3533 #ifdef CONFIG_CGROUP_WRITEBACK
3534 INIT_LIST_HEAD(&memcg->cgwb_list);
3535 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3536 memcg->cgwb_frn[i].done =
3537 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3538 #endif
3539 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3540 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3541 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3542 memcg->deferred_split_queue.split_queue_len = 0;
3543 #endif
3544 lru_gen_init_memcg(memcg);
3545 return memcg;
3546 fail:
3547 mem_cgroup_id_remove(memcg);
3548 __mem_cgroup_free(memcg);
3549 return ERR_PTR(error);
3550 }
3551
3552 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3553 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3554 {
3555 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3556 struct mem_cgroup *memcg, *old_memcg;
3557
3558 old_memcg = set_active_memcg(parent);
3559 memcg = mem_cgroup_alloc(parent);
3560 set_active_memcg(old_memcg);
3561 if (IS_ERR(memcg))
3562 return ERR_CAST(memcg);
3563
3564 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3565 memcg1_soft_limit_reset(memcg);
3566 #ifdef CONFIG_ZSWAP
3567 memcg->zswap_max = PAGE_COUNTER_MAX;
3568 WRITE_ONCE(memcg->zswap_writeback, true);
3569 #endif
3570 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3571 if (parent) {
3572 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3573
3574 page_counter_init(&memcg->memory, &parent->memory, true);
3575 page_counter_init(&memcg->swap, &parent->swap, false);
3576 #ifdef CONFIG_MEMCG_V1
3577 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3578 page_counter_init(&memcg->kmem, &parent->kmem, false);
3579 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3580 #endif
3581 } else {
3582 init_memcg_stats();
3583 init_memcg_events();
3584 page_counter_init(&memcg->memory, NULL, true);
3585 page_counter_init(&memcg->swap, NULL, false);
3586 #ifdef CONFIG_MEMCG_V1
3587 page_counter_init(&memcg->kmem, NULL, false);
3588 page_counter_init(&memcg->tcpmem, NULL, false);
3589 #endif
3590 root_mem_cgroup = memcg;
3591 return &memcg->css;
3592 }
3593
3594 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3595 static_branch_inc(&memcg_sockets_enabled_key);
3596
3597 if (!cgroup_memory_nobpf)
3598 static_branch_inc(&memcg_bpf_enabled_key);
3599
3600 return &memcg->css;
3601 }
3602
mem_cgroup_css_online(struct cgroup_subsys_state * css)3603 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3604 {
3605 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3606
3607 if (memcg_online_kmem(memcg))
3608 goto remove_id;
3609
3610 /*
3611 * A memcg must be visible for expand_shrinker_info()
3612 * by the time the maps are allocated. So, we allocate maps
3613 * here, when for_each_mem_cgroup() can't skip it.
3614 */
3615 if (alloc_shrinker_info(memcg))
3616 goto offline_kmem;
3617
3618 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3619 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3620 FLUSH_TIME);
3621 lru_gen_online_memcg(memcg);
3622
3623 /* Online state pins memcg ID, memcg ID pins CSS */
3624 refcount_set(&memcg->id.ref, 1);
3625 css_get(css);
3626
3627 /*
3628 * Ensure mem_cgroup_from_id() works once we're fully online.
3629 *
3630 * We could do this earlier and require callers to filter with
3631 * css_tryget_online(). But right now there are no users that
3632 * need earlier access, and the workingset code relies on the
3633 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3634 * publish it here at the end of onlining. This matches the
3635 * regular ID destruction during offlining.
3636 */
3637 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3638
3639 return 0;
3640 offline_kmem:
3641 memcg_offline_kmem(memcg);
3642 remove_id:
3643 mem_cgroup_id_remove(memcg);
3644 return -ENOMEM;
3645 }
3646
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3647 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3648 {
3649 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3650
3651 memcg1_css_offline(memcg);
3652
3653 page_counter_set_min(&memcg->memory, 0);
3654 page_counter_set_low(&memcg->memory, 0);
3655
3656 zswap_memcg_offline_cleanup(memcg);
3657
3658 memcg_offline_kmem(memcg);
3659 reparent_shrinker_deferred(memcg);
3660 wb_memcg_offline(memcg);
3661 lru_gen_offline_memcg(memcg);
3662
3663 drain_all_stock(memcg);
3664
3665 mem_cgroup_id_put(memcg);
3666 }
3667
mem_cgroup_css_released(struct cgroup_subsys_state * css)3668 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3669 {
3670 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3671
3672 invalidate_reclaim_iterators(memcg);
3673 lru_gen_release_memcg(memcg);
3674 }
3675
mem_cgroup_css_free(struct cgroup_subsys_state * css)3676 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3677 {
3678 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3679 int __maybe_unused i;
3680
3681 #ifdef CONFIG_CGROUP_WRITEBACK
3682 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3683 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3684 #endif
3685 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3686 static_branch_dec(&memcg_sockets_enabled_key);
3687
3688 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3689 static_branch_dec(&memcg_sockets_enabled_key);
3690
3691 if (!cgroup_memory_nobpf)
3692 static_branch_dec(&memcg_bpf_enabled_key);
3693
3694 vmpressure_cleanup(&memcg->vmpressure);
3695 cancel_work_sync(&memcg->high_work);
3696 memcg1_remove_from_trees(memcg);
3697 free_shrinker_info(memcg);
3698 mem_cgroup_free(memcg);
3699 }
3700
3701 /**
3702 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3703 * @css: the target css
3704 *
3705 * Reset the states of the mem_cgroup associated with @css. This is
3706 * invoked when the userland requests disabling on the default hierarchy
3707 * but the memcg is pinned through dependency. The memcg should stop
3708 * applying policies and should revert to the vanilla state as it may be
3709 * made visible again.
3710 *
3711 * The current implementation only resets the essential configurations.
3712 * This needs to be expanded to cover all the visible parts.
3713 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3714 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3715 {
3716 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3717
3718 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3719 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3720 #ifdef CONFIG_MEMCG_V1
3721 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3722 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3723 #endif
3724 page_counter_set_min(&memcg->memory, 0);
3725 page_counter_set_low(&memcg->memory, 0);
3726 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3727 memcg1_soft_limit_reset(memcg);
3728 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3729 memcg_wb_domain_size_changed(memcg);
3730 }
3731
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3732 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3733 {
3734 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3735 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3736 struct memcg_vmstats_percpu *statc;
3737 long delta, delta_cpu, v;
3738 int i, nid;
3739
3740 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3741
3742 for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
3743 /*
3744 * Collect the aggregated propagation counts of groups
3745 * below us. We're in a per-cpu loop here and this is
3746 * a global counter, so the first cycle will get them.
3747 */
3748 delta = memcg->vmstats->state_pending[i];
3749 if (delta)
3750 memcg->vmstats->state_pending[i] = 0;
3751
3752 /* Add CPU changes on this level since the last flush */
3753 delta_cpu = 0;
3754 v = READ_ONCE(statc->state[i]);
3755 if (v != statc->state_prev[i]) {
3756 delta_cpu = v - statc->state_prev[i];
3757 delta += delta_cpu;
3758 statc->state_prev[i] = v;
3759 }
3760
3761 /* Aggregate counts on this level and propagate upwards */
3762 if (delta_cpu)
3763 memcg->vmstats->state_local[i] += delta_cpu;
3764
3765 if (delta) {
3766 memcg->vmstats->state[i] += delta;
3767 if (parent)
3768 parent->vmstats->state_pending[i] += delta;
3769 }
3770 }
3771
3772 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
3773 delta = memcg->vmstats->events_pending[i];
3774 if (delta)
3775 memcg->vmstats->events_pending[i] = 0;
3776
3777 delta_cpu = 0;
3778 v = READ_ONCE(statc->events[i]);
3779 if (v != statc->events_prev[i]) {
3780 delta_cpu = v - statc->events_prev[i];
3781 delta += delta_cpu;
3782 statc->events_prev[i] = v;
3783 }
3784
3785 if (delta_cpu)
3786 memcg->vmstats->events_local[i] += delta_cpu;
3787
3788 if (delta) {
3789 memcg->vmstats->events[i] += delta;
3790 if (parent)
3791 parent->vmstats->events_pending[i] += delta;
3792 }
3793 }
3794
3795 for_each_node_state(nid, N_MEMORY) {
3796 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3797 struct lruvec_stats *lstats = pn->lruvec_stats;
3798 struct lruvec_stats *plstats = NULL;
3799 struct lruvec_stats_percpu *lstatc;
3800
3801 if (parent)
3802 plstats = parent->nodeinfo[nid]->lruvec_stats;
3803
3804 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3805
3806 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
3807 delta = lstats->state_pending[i];
3808 if (delta)
3809 lstats->state_pending[i] = 0;
3810
3811 delta_cpu = 0;
3812 v = READ_ONCE(lstatc->state[i]);
3813 if (v != lstatc->state_prev[i]) {
3814 delta_cpu = v - lstatc->state_prev[i];
3815 delta += delta_cpu;
3816 lstatc->state_prev[i] = v;
3817 }
3818
3819 if (delta_cpu)
3820 lstats->state_local[i] += delta_cpu;
3821
3822 if (delta) {
3823 lstats->state[i] += delta;
3824 if (plstats)
3825 plstats->state_pending[i] += delta;
3826 }
3827 }
3828 }
3829 WRITE_ONCE(statc->stats_updates, 0);
3830 /* We are in a per-cpu loop here, only do the atomic write once */
3831 if (atomic64_read(&memcg->vmstats->stats_updates))
3832 atomic64_set(&memcg->vmstats->stats_updates, 0);
3833 }
3834
mem_cgroup_fork(struct task_struct * task)3835 static void mem_cgroup_fork(struct task_struct *task)
3836 {
3837 /*
3838 * Set the update flag to cause task->objcg to be initialized lazily
3839 * on the first allocation. It can be done without any synchronization
3840 * because it's always performed on the current task, so does
3841 * current_objcg_update().
3842 */
3843 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3844 }
3845
mem_cgroup_exit(struct task_struct * task)3846 static void mem_cgroup_exit(struct task_struct *task)
3847 {
3848 struct obj_cgroup *objcg = task->objcg;
3849
3850 objcg = (struct obj_cgroup *)
3851 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3852 obj_cgroup_put(objcg);
3853
3854 /*
3855 * Some kernel allocations can happen after this point,
3856 * but let's ignore them. It can be done without any synchronization
3857 * because it's always performed on the current task, so does
3858 * current_objcg_update().
3859 */
3860 task->objcg = NULL;
3861 }
3862
3863 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3864 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3865 {
3866 struct task_struct *task;
3867 struct cgroup_subsys_state *css;
3868
3869 /* find the first leader if there is any */
3870 cgroup_taskset_for_each_leader(task, css, tset)
3871 break;
3872
3873 if (!task)
3874 return;
3875
3876 task_lock(task);
3877 if (task->mm && READ_ONCE(task->mm->owner) == task)
3878 lru_gen_migrate_mm(task->mm);
3879 task_unlock(task);
3880 }
3881 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3882 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3883 #endif /* CONFIG_LRU_GEN */
3884
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)3885 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3886 {
3887 struct task_struct *task;
3888 struct cgroup_subsys_state *css;
3889
3890 cgroup_taskset_for_each(task, css, tset) {
3891 /* atomically set the update bit */
3892 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3893 }
3894 }
3895
mem_cgroup_attach(struct cgroup_taskset * tset)3896 static void mem_cgroup_attach(struct cgroup_taskset *tset)
3897 {
3898 mem_cgroup_lru_gen_attach(tset);
3899 mem_cgroup_kmem_attach(tset);
3900 }
3901
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)3902 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3903 {
3904 if (value == PAGE_COUNTER_MAX)
3905 seq_puts(m, "max\n");
3906 else
3907 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3908
3909 return 0;
3910 }
3911
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)3912 static u64 memory_current_read(struct cgroup_subsys_state *css,
3913 struct cftype *cft)
3914 {
3915 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3916
3917 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3918 }
3919
3920 #define OFP_PEAK_UNSET (((-1UL)))
3921
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)3922 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
3923 {
3924 struct cgroup_of_peak *ofp = of_peak(sf->private);
3925 u64 fd_peak = READ_ONCE(ofp->value), peak;
3926
3927 /* User wants global or local peak? */
3928 if (fd_peak == OFP_PEAK_UNSET)
3929 peak = pc->watermark;
3930 else
3931 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
3932
3933 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
3934 return 0;
3935 }
3936
memory_peak_show(struct seq_file * sf,void * v)3937 static int memory_peak_show(struct seq_file *sf, void *v)
3938 {
3939 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3940
3941 return peak_show(sf, v, &memcg->memory);
3942 }
3943
peak_open(struct kernfs_open_file * of)3944 static int peak_open(struct kernfs_open_file *of)
3945 {
3946 struct cgroup_of_peak *ofp = of_peak(of);
3947
3948 ofp->value = OFP_PEAK_UNSET;
3949 return 0;
3950 }
3951
peak_release(struct kernfs_open_file * of)3952 static void peak_release(struct kernfs_open_file *of)
3953 {
3954 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3955 struct cgroup_of_peak *ofp = of_peak(of);
3956
3957 if (ofp->value == OFP_PEAK_UNSET) {
3958 /* fast path (no writes on this fd) */
3959 return;
3960 }
3961 spin_lock(&memcg->peaks_lock);
3962 list_del(&ofp->list);
3963 spin_unlock(&memcg->peaks_lock);
3964 }
3965
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)3966 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
3967 loff_t off, struct page_counter *pc,
3968 struct list_head *watchers)
3969 {
3970 unsigned long usage;
3971 struct cgroup_of_peak *peer_ctx;
3972 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3973 struct cgroup_of_peak *ofp = of_peak(of);
3974
3975 spin_lock(&memcg->peaks_lock);
3976
3977 usage = page_counter_read(pc);
3978 WRITE_ONCE(pc->local_watermark, usage);
3979
3980 list_for_each_entry(peer_ctx, watchers, list)
3981 if (usage > peer_ctx->value)
3982 WRITE_ONCE(peer_ctx->value, usage);
3983
3984 /* initial write, register watcher */
3985 if (ofp->value == -1)
3986 list_add(&ofp->list, watchers);
3987
3988 WRITE_ONCE(ofp->value, usage);
3989 spin_unlock(&memcg->peaks_lock);
3990
3991 return nbytes;
3992 }
3993
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3994 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
3995 size_t nbytes, loff_t off)
3996 {
3997 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3998
3999 return peak_write(of, buf, nbytes, off, &memcg->memory,
4000 &memcg->memory_peaks);
4001 }
4002
4003 #undef OFP_PEAK_UNSET
4004
memory_min_show(struct seq_file * m,void * v)4005 static int memory_min_show(struct seq_file *m, void *v)
4006 {
4007 return seq_puts_memcg_tunable(m,
4008 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4009 }
4010
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4011 static ssize_t memory_min_write(struct kernfs_open_file *of,
4012 char *buf, size_t nbytes, loff_t off)
4013 {
4014 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4015 unsigned long min;
4016 int err;
4017
4018 buf = strstrip(buf);
4019 err = page_counter_memparse(buf, "max", &min);
4020 if (err)
4021 return err;
4022
4023 page_counter_set_min(&memcg->memory, min);
4024
4025 return nbytes;
4026 }
4027
memory_low_show(struct seq_file * m,void * v)4028 static int memory_low_show(struct seq_file *m, void *v)
4029 {
4030 return seq_puts_memcg_tunable(m,
4031 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4032 }
4033
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4034 static ssize_t memory_low_write(struct kernfs_open_file *of,
4035 char *buf, size_t nbytes, loff_t off)
4036 {
4037 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4038 unsigned long low;
4039 int err;
4040
4041 buf = strstrip(buf);
4042 err = page_counter_memparse(buf, "max", &low);
4043 if (err)
4044 return err;
4045
4046 page_counter_set_low(&memcg->memory, low);
4047
4048 return nbytes;
4049 }
4050
memory_high_show(struct seq_file * m,void * v)4051 static int memory_high_show(struct seq_file *m, void *v)
4052 {
4053 return seq_puts_memcg_tunable(m,
4054 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4055 }
4056
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4057 static ssize_t memory_high_write(struct kernfs_open_file *of,
4058 char *buf, size_t nbytes, loff_t off)
4059 {
4060 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4061 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4062 bool drained = false;
4063 unsigned long high;
4064 int err;
4065
4066 buf = strstrip(buf);
4067 err = page_counter_memparse(buf, "max", &high);
4068 if (err)
4069 return err;
4070
4071 page_counter_set_high(&memcg->memory, high);
4072
4073 for (;;) {
4074 unsigned long nr_pages = page_counter_read(&memcg->memory);
4075 unsigned long reclaimed;
4076
4077 if (nr_pages <= high)
4078 break;
4079
4080 if (signal_pending(current))
4081 break;
4082
4083 if (!drained) {
4084 drain_all_stock(memcg);
4085 drained = true;
4086 continue;
4087 }
4088
4089 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4090 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4091
4092 if (!reclaimed && !nr_retries--)
4093 break;
4094 }
4095
4096 memcg_wb_domain_size_changed(memcg);
4097 return nbytes;
4098 }
4099
memory_max_show(struct seq_file * m,void * v)4100 static int memory_max_show(struct seq_file *m, void *v)
4101 {
4102 return seq_puts_memcg_tunable(m,
4103 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4104 }
4105
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4106 static ssize_t memory_max_write(struct kernfs_open_file *of,
4107 char *buf, size_t nbytes, loff_t off)
4108 {
4109 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4110 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4111 bool drained = false;
4112 unsigned long max;
4113 int err;
4114
4115 buf = strstrip(buf);
4116 err = page_counter_memparse(buf, "max", &max);
4117 if (err)
4118 return err;
4119
4120 xchg(&memcg->memory.max, max);
4121
4122 for (;;) {
4123 unsigned long nr_pages = page_counter_read(&memcg->memory);
4124
4125 if (nr_pages <= max)
4126 break;
4127
4128 if (signal_pending(current))
4129 break;
4130
4131 if (!drained) {
4132 drain_all_stock(memcg);
4133 drained = true;
4134 continue;
4135 }
4136
4137 if (nr_reclaims) {
4138 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4139 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4140 nr_reclaims--;
4141 continue;
4142 }
4143
4144 memcg_memory_event(memcg, MEMCG_OOM);
4145 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4146 break;
4147 }
4148
4149 memcg_wb_domain_size_changed(memcg);
4150 return nbytes;
4151 }
4152
4153 /*
4154 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4155 * if any new events become available.
4156 */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4157 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4158 {
4159 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4160 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4161 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4162 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4163 seq_printf(m, "oom_kill %lu\n",
4164 atomic_long_read(&events[MEMCG_OOM_KILL]));
4165 seq_printf(m, "oom_group_kill %lu\n",
4166 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4167 }
4168
memory_events_show(struct seq_file * m,void * v)4169 static int memory_events_show(struct seq_file *m, void *v)
4170 {
4171 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4172
4173 __memory_events_show(m, memcg->memory_events);
4174 return 0;
4175 }
4176
memory_events_local_show(struct seq_file * m,void * v)4177 static int memory_events_local_show(struct seq_file *m, void *v)
4178 {
4179 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4180
4181 __memory_events_show(m, memcg->memory_events_local);
4182 return 0;
4183 }
4184
memory_stat_show(struct seq_file * m,void * v)4185 int memory_stat_show(struct seq_file *m, void *v)
4186 {
4187 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4188 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4189 struct seq_buf s;
4190
4191 if (!buf)
4192 return -ENOMEM;
4193 seq_buf_init(&s, buf, PAGE_SIZE);
4194 memory_stat_format(memcg, &s);
4195 seq_puts(m, buf);
4196 kfree(buf);
4197 return 0;
4198 }
4199
4200 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4201 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4202 int item)
4203 {
4204 return lruvec_page_state(lruvec, item) *
4205 memcg_page_state_output_unit(item);
4206 }
4207
memory_numa_stat_show(struct seq_file * m,void * v)4208 static int memory_numa_stat_show(struct seq_file *m, void *v)
4209 {
4210 int i;
4211 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4212
4213 mem_cgroup_flush_stats(memcg);
4214
4215 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4216 int nid;
4217
4218 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4219 continue;
4220
4221 seq_printf(m, "%s", memory_stats[i].name);
4222 for_each_node_state(nid, N_MEMORY) {
4223 u64 size;
4224 struct lruvec *lruvec;
4225
4226 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4227 size = lruvec_page_state_output(lruvec,
4228 memory_stats[i].idx);
4229 seq_printf(m, " N%d=%llu", nid, size);
4230 }
4231 seq_putc(m, '\n');
4232 }
4233
4234 return 0;
4235 }
4236 #endif
4237
memory_oom_group_show(struct seq_file * m,void * v)4238 static int memory_oom_group_show(struct seq_file *m, void *v)
4239 {
4240 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4241
4242 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4243
4244 return 0;
4245 }
4246
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4247 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4248 char *buf, size_t nbytes, loff_t off)
4249 {
4250 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4251 int ret, oom_group;
4252
4253 buf = strstrip(buf);
4254 if (!buf)
4255 return -EINVAL;
4256
4257 ret = kstrtoint(buf, 0, &oom_group);
4258 if (ret)
4259 return ret;
4260
4261 if (oom_group != 0 && oom_group != 1)
4262 return -EINVAL;
4263
4264 WRITE_ONCE(memcg->oom_group, oom_group);
4265
4266 return nbytes;
4267 }
4268
4269 enum {
4270 MEMORY_RECLAIM_SWAPPINESS = 0,
4271 MEMORY_RECLAIM_NULL,
4272 };
4273
4274 static const match_table_t tokens = {
4275 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4276 { MEMORY_RECLAIM_NULL, NULL },
4277 };
4278
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4279 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4280 size_t nbytes, loff_t off)
4281 {
4282 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4283 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4284 unsigned long nr_to_reclaim, nr_reclaimed = 0;
4285 int swappiness = -1;
4286 unsigned int reclaim_options;
4287 char *old_buf, *start;
4288 substring_t args[MAX_OPT_ARGS];
4289
4290 buf = strstrip(buf);
4291
4292 old_buf = buf;
4293 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4294 if (buf == old_buf)
4295 return -EINVAL;
4296
4297 buf = strstrip(buf);
4298
4299 while ((start = strsep(&buf, " ")) != NULL) {
4300 if (!strlen(start))
4301 continue;
4302 switch (match_token(start, tokens, args)) {
4303 case MEMORY_RECLAIM_SWAPPINESS:
4304 if (match_int(&args[0], &swappiness))
4305 return -EINVAL;
4306 if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4307 return -EINVAL;
4308 break;
4309 default:
4310 return -EINVAL;
4311 }
4312 }
4313
4314 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4315 while (nr_reclaimed < nr_to_reclaim) {
4316 /* Will converge on zero, but reclaim enforces a minimum */
4317 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4318 unsigned long reclaimed;
4319
4320 if (signal_pending(current))
4321 return -EINTR;
4322
4323 /*
4324 * This is the final attempt, drain percpu lru caches in the
4325 * hope of introducing more evictable pages for
4326 * try_to_free_mem_cgroup_pages().
4327 */
4328 if (!nr_retries)
4329 lru_add_drain_all();
4330
4331 reclaimed = try_to_free_mem_cgroup_pages(memcg,
4332 batch_size, GFP_KERNEL,
4333 reclaim_options,
4334 swappiness == -1 ? NULL : &swappiness);
4335
4336 if (!reclaimed && !nr_retries--)
4337 return -EAGAIN;
4338
4339 nr_reclaimed += reclaimed;
4340 }
4341
4342 return nbytes;
4343 }
4344
4345 static struct cftype memory_files[] = {
4346 {
4347 .name = "current",
4348 .flags = CFTYPE_NOT_ON_ROOT,
4349 .read_u64 = memory_current_read,
4350 },
4351 {
4352 .name = "peak",
4353 .flags = CFTYPE_NOT_ON_ROOT,
4354 .open = peak_open,
4355 .release = peak_release,
4356 .seq_show = memory_peak_show,
4357 .write = memory_peak_write,
4358 },
4359 {
4360 .name = "min",
4361 .flags = CFTYPE_NOT_ON_ROOT,
4362 .seq_show = memory_min_show,
4363 .write = memory_min_write,
4364 },
4365 {
4366 .name = "low",
4367 .flags = CFTYPE_NOT_ON_ROOT,
4368 .seq_show = memory_low_show,
4369 .write = memory_low_write,
4370 },
4371 {
4372 .name = "high",
4373 .flags = CFTYPE_NOT_ON_ROOT,
4374 .seq_show = memory_high_show,
4375 .write = memory_high_write,
4376 },
4377 {
4378 .name = "max",
4379 .flags = CFTYPE_NOT_ON_ROOT,
4380 .seq_show = memory_max_show,
4381 .write = memory_max_write,
4382 },
4383 {
4384 .name = "events",
4385 .flags = CFTYPE_NOT_ON_ROOT,
4386 .file_offset = offsetof(struct mem_cgroup, events_file),
4387 .seq_show = memory_events_show,
4388 },
4389 {
4390 .name = "events.local",
4391 .flags = CFTYPE_NOT_ON_ROOT,
4392 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4393 .seq_show = memory_events_local_show,
4394 },
4395 {
4396 .name = "stat",
4397 .seq_show = memory_stat_show,
4398 },
4399 #ifdef CONFIG_NUMA
4400 {
4401 .name = "numa_stat",
4402 .seq_show = memory_numa_stat_show,
4403 },
4404 #endif
4405 {
4406 .name = "oom.group",
4407 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4408 .seq_show = memory_oom_group_show,
4409 .write = memory_oom_group_write,
4410 },
4411 {
4412 .name = "reclaim",
4413 .flags = CFTYPE_NS_DELEGATABLE,
4414 .write = memory_reclaim,
4415 },
4416 { } /* terminate */
4417 };
4418
4419 struct cgroup_subsys memory_cgrp_subsys = {
4420 .css_alloc = mem_cgroup_css_alloc,
4421 .css_online = mem_cgroup_css_online,
4422 .css_offline = mem_cgroup_css_offline,
4423 .css_released = mem_cgroup_css_released,
4424 .css_free = mem_cgroup_css_free,
4425 .css_reset = mem_cgroup_css_reset,
4426 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4427 .attach = mem_cgroup_attach,
4428 .fork = mem_cgroup_fork,
4429 .exit = mem_cgroup_exit,
4430 .dfl_cftypes = memory_files,
4431 #ifdef CONFIG_MEMCG_V1
4432 .can_attach = memcg1_can_attach,
4433 .cancel_attach = memcg1_cancel_attach,
4434 .post_attach = memcg1_move_task,
4435 .legacy_cftypes = mem_cgroup_legacy_files,
4436 #endif
4437 .early_init = 0,
4438 };
4439
4440 /**
4441 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4442 * @root: the top ancestor of the sub-tree being checked
4443 * @memcg: the memory cgroup to check
4444 *
4445 * WARNING: This function is not stateless! It can only be used as part
4446 * of a top-down tree iteration, not for isolated queries.
4447 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4448 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4449 struct mem_cgroup *memcg)
4450 {
4451 bool recursive_protection =
4452 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4453
4454 if (mem_cgroup_disabled())
4455 return;
4456
4457 if (!root)
4458 root = root_mem_cgroup;
4459
4460 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4461 }
4462
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4463 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4464 gfp_t gfp)
4465 {
4466 int ret;
4467
4468 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4469 if (ret)
4470 goto out;
4471
4472 mem_cgroup_commit_charge(folio, memcg);
4473 out:
4474 return ret;
4475 }
4476
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4477 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4478 {
4479 struct mem_cgroup *memcg;
4480 int ret;
4481
4482 memcg = get_mem_cgroup_from_mm(mm);
4483 ret = charge_memcg(folio, memcg, gfp);
4484 css_put(&memcg->css);
4485
4486 return ret;
4487 }
4488
4489 /**
4490 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
4491 * @memcg: memcg to charge.
4492 * @gfp: reclaim mode.
4493 * @nr_pages: number of pages to charge.
4494 *
4495 * This function is called when allocating a huge page folio to determine if
4496 * the memcg has the capacity for it. It does not commit the charge yet,
4497 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
4498 *
4499 * Once we have obtained the hugetlb folio, we can call
4500 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
4501 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
4502 * of try_charge().
4503 *
4504 * Returns 0 on success. Otherwise, an error code is returned.
4505 */
mem_cgroup_hugetlb_try_charge(struct mem_cgroup * memcg,gfp_t gfp,long nr_pages)4506 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
4507 long nr_pages)
4508 {
4509 /*
4510 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
4511 * but do not attempt to commit charge later (or cancel on error) either.
4512 */
4513 if (mem_cgroup_disabled() || !memcg ||
4514 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
4515 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
4516 return -EOPNOTSUPP;
4517
4518 if (try_charge(memcg, gfp, nr_pages))
4519 return -ENOMEM;
4520
4521 return 0;
4522 }
4523
4524 /**
4525 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4526 * @folio: folio to charge.
4527 * @mm: mm context of the victim
4528 * @gfp: reclaim mode
4529 * @entry: swap entry for which the folio is allocated
4530 *
4531 * This function charges a folio allocated for swapin. Please call this before
4532 * adding the folio to the swapcache.
4533 *
4534 * Returns 0 on success. Otherwise, an error code is returned.
4535 */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4536 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4537 gfp_t gfp, swp_entry_t entry)
4538 {
4539 struct mem_cgroup *memcg;
4540 unsigned short id;
4541 int ret;
4542
4543 if (mem_cgroup_disabled())
4544 return 0;
4545
4546 id = lookup_swap_cgroup_id(entry);
4547 rcu_read_lock();
4548 memcg = mem_cgroup_from_id(id);
4549 if (!memcg || !css_tryget_online(&memcg->css))
4550 memcg = get_mem_cgroup_from_mm(mm);
4551 rcu_read_unlock();
4552
4553 ret = charge_memcg(folio, memcg, gfp);
4554
4555 css_put(&memcg->css);
4556 return ret;
4557 }
4558
4559 /*
4560 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4561 * @entry: the first swap entry for which the pages are charged
4562 * @nr_pages: number of pages which will be uncharged
4563 *
4564 * Call this function after successfully adding the charged page to swapcache.
4565 *
4566 * Note: This function assumes the page for which swap slot is being uncharged
4567 * is order 0 page.
4568 */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)4569 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
4570 {
4571 /*
4572 * Cgroup1's unified memory+swap counter has been charged with the
4573 * new swapcache page, finish the transfer by uncharging the swap
4574 * slot. The swap slot would also get uncharged when it dies, but
4575 * it can stick around indefinitely and we'd count the page twice
4576 * the entire time.
4577 *
4578 * Cgroup2 has separate resource counters for memory and swap,
4579 * so this is a non-issue here. Memory and swap charge lifetimes
4580 * correspond 1:1 to page and swap slot lifetimes: we charge the
4581 * page to memory here, and uncharge swap when the slot is freed.
4582 */
4583 if (!mem_cgroup_disabled() && do_memsw_account()) {
4584 /*
4585 * The swap entry might not get freed for a long time,
4586 * let's not wait for it. The page already received a
4587 * memory+swap charge, drop the swap entry duplicate.
4588 */
4589 mem_cgroup_uncharge_swap(entry, nr_pages);
4590 }
4591 }
4592
4593 struct uncharge_gather {
4594 struct mem_cgroup *memcg;
4595 unsigned long nr_memory;
4596 unsigned long pgpgout;
4597 unsigned long nr_kmem;
4598 int nid;
4599 };
4600
uncharge_gather_clear(struct uncharge_gather * ug)4601 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4602 {
4603 memset(ug, 0, sizeof(*ug));
4604 }
4605
uncharge_batch(const struct uncharge_gather * ug)4606 static void uncharge_batch(const struct uncharge_gather *ug)
4607 {
4608 if (ug->nr_memory) {
4609 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4610 if (do_memsw_account())
4611 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4612 if (ug->nr_kmem) {
4613 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4614 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4615 }
4616 memcg1_oom_recover(ug->memcg);
4617 }
4618
4619 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4620
4621 /* drop reference from uncharge_folio */
4622 css_put(&ug->memcg->css);
4623 }
4624
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4625 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4626 {
4627 long nr_pages;
4628 struct mem_cgroup *memcg;
4629 struct obj_cgroup *objcg;
4630
4631 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4632 VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
4633 !folio_test_hugetlb(folio) &&
4634 !list_empty(&folio->_deferred_list) &&
4635 folio_test_partially_mapped(folio), folio);
4636
4637 /*
4638 * Nobody should be changing or seriously looking at
4639 * folio memcg or objcg at this point, we have fully
4640 * exclusive access to the folio.
4641 */
4642 if (folio_memcg_kmem(folio)) {
4643 objcg = __folio_objcg(folio);
4644 /*
4645 * This get matches the put at the end of the function and
4646 * kmem pages do not hold memcg references anymore.
4647 */
4648 memcg = get_mem_cgroup_from_objcg(objcg);
4649 } else {
4650 memcg = __folio_memcg(folio);
4651 }
4652
4653 if (!memcg)
4654 return;
4655
4656 if (ug->memcg != memcg) {
4657 if (ug->memcg) {
4658 uncharge_batch(ug);
4659 uncharge_gather_clear(ug);
4660 }
4661 ug->memcg = memcg;
4662 ug->nid = folio_nid(folio);
4663
4664 /* pairs with css_put in uncharge_batch */
4665 css_get(&memcg->css);
4666 }
4667
4668 nr_pages = folio_nr_pages(folio);
4669
4670 if (folio_memcg_kmem(folio)) {
4671 ug->nr_memory += nr_pages;
4672 ug->nr_kmem += nr_pages;
4673
4674 folio->memcg_data = 0;
4675 obj_cgroup_put(objcg);
4676 } else {
4677 /* LRU pages aren't accounted at the root level */
4678 if (!mem_cgroup_is_root(memcg))
4679 ug->nr_memory += nr_pages;
4680 ug->pgpgout++;
4681
4682 folio->memcg_data = 0;
4683 }
4684
4685 css_put(&memcg->css);
4686 }
4687
__mem_cgroup_uncharge(struct folio * folio)4688 void __mem_cgroup_uncharge(struct folio *folio)
4689 {
4690 struct uncharge_gather ug;
4691
4692 /* Don't touch folio->lru of any random page, pre-check: */
4693 if (!folio_memcg_charged(folio))
4694 return;
4695
4696 uncharge_gather_clear(&ug);
4697 uncharge_folio(folio, &ug);
4698 uncharge_batch(&ug);
4699 }
4700
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4701 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4702 {
4703 struct uncharge_gather ug;
4704 unsigned int i;
4705
4706 uncharge_gather_clear(&ug);
4707 for (i = 0; i < folios->nr; i++)
4708 uncharge_folio(folios->folios[i], &ug);
4709 if (ug.memcg)
4710 uncharge_batch(&ug);
4711 }
4712
4713 /**
4714 * mem_cgroup_replace_folio - Charge a folio's replacement.
4715 * @old: Currently circulating folio.
4716 * @new: Replacement folio.
4717 *
4718 * Charge @new as a replacement folio for @old. @old will
4719 * be uncharged upon free.
4720 *
4721 * Both folios must be locked, @new->mapping must be set up.
4722 */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4723 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4724 {
4725 struct mem_cgroup *memcg;
4726 long nr_pages = folio_nr_pages(new);
4727
4728 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4729 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4730 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4731 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4732
4733 if (mem_cgroup_disabled())
4734 return;
4735
4736 /* Page cache replacement: new folio already charged? */
4737 if (folio_memcg_charged(new))
4738 return;
4739
4740 memcg = folio_memcg(old);
4741 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4742 if (!memcg)
4743 return;
4744
4745 /* Force-charge the new page. The old one will be freed soon */
4746 if (!mem_cgroup_is_root(memcg)) {
4747 page_counter_charge(&memcg->memory, nr_pages);
4748 if (do_memsw_account())
4749 page_counter_charge(&memcg->memsw, nr_pages);
4750 }
4751
4752 css_get(&memcg->css);
4753 commit_charge(new, memcg);
4754 memcg1_commit_charge(new, memcg);
4755 }
4756
4757 /**
4758 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4759 * @old: Currently circulating folio.
4760 * @new: Replacement folio.
4761 *
4762 * Transfer the memcg data from the old folio to the new folio for migration.
4763 * The old folio's data info will be cleared. Note that the memory counters
4764 * will remain unchanged throughout the process.
4765 *
4766 * Both folios must be locked, @new->mapping must be set up.
4767 */
mem_cgroup_migrate(struct folio * old,struct folio * new)4768 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4769 {
4770 struct mem_cgroup *memcg;
4771
4772 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4773 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4774 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4775 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4776 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4777
4778 if (mem_cgroup_disabled())
4779 return;
4780
4781 memcg = folio_memcg(old);
4782 /*
4783 * Note that it is normal to see !memcg for a hugetlb folio.
4784 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4785 * was not selected.
4786 */
4787 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4788 if (!memcg)
4789 return;
4790
4791 /* Transfer the charge and the css ref */
4792 commit_charge(new, memcg);
4793 old->memcg_data = 0;
4794 }
4795
4796 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4797 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4798
mem_cgroup_sk_alloc(struct sock * sk)4799 void mem_cgroup_sk_alloc(struct sock *sk)
4800 {
4801 struct mem_cgroup *memcg;
4802
4803 if (!mem_cgroup_sockets_enabled)
4804 return;
4805
4806 /* Do not associate the sock with unrelated interrupted task's memcg. */
4807 if (!in_task())
4808 return;
4809
4810 rcu_read_lock();
4811 memcg = mem_cgroup_from_task(current);
4812 if (mem_cgroup_is_root(memcg))
4813 goto out;
4814 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4815 goto out;
4816 if (css_tryget(&memcg->css))
4817 sk->sk_memcg = memcg;
4818 out:
4819 rcu_read_unlock();
4820 }
4821
mem_cgroup_sk_free(struct sock * sk)4822 void mem_cgroup_sk_free(struct sock *sk)
4823 {
4824 if (sk->sk_memcg)
4825 css_put(&sk->sk_memcg->css);
4826 }
4827
4828 /**
4829 * mem_cgroup_charge_skmem - charge socket memory
4830 * @memcg: memcg to charge
4831 * @nr_pages: number of pages to charge
4832 * @gfp_mask: reclaim mode
4833 *
4834 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4835 * @memcg's configured limit, %false if it doesn't.
4836 */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4837 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4838 gfp_t gfp_mask)
4839 {
4840 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4841 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4842
4843 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4844 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4845 return true;
4846 }
4847
4848 return false;
4849 }
4850
4851 /**
4852 * mem_cgroup_uncharge_skmem - uncharge socket memory
4853 * @memcg: memcg to uncharge
4854 * @nr_pages: number of pages to uncharge
4855 */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4856 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4857 {
4858 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4859 memcg1_uncharge_skmem(memcg, nr_pages);
4860 return;
4861 }
4862
4863 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4864
4865 refill_stock(memcg, nr_pages);
4866 }
4867
cgroup_memory(char * s)4868 static int __init cgroup_memory(char *s)
4869 {
4870 char *token;
4871
4872 while ((token = strsep(&s, ",")) != NULL) {
4873 if (!*token)
4874 continue;
4875 if (!strcmp(token, "nosocket"))
4876 cgroup_memory_nosocket = true;
4877 if (!strcmp(token, "nokmem"))
4878 cgroup_memory_nokmem = true;
4879 if (!strcmp(token, "nobpf"))
4880 cgroup_memory_nobpf = true;
4881 }
4882 return 1;
4883 }
4884 __setup("cgroup.memory=", cgroup_memory);
4885
4886 /*
4887 * subsys_initcall() for memory controller.
4888 *
4889 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4890 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4891 * basically everything that doesn't depend on a specific mem_cgroup structure
4892 * should be initialized from here.
4893 */
mem_cgroup_init(void)4894 static int __init mem_cgroup_init(void)
4895 {
4896 int cpu;
4897
4898 /*
4899 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4900 * used for per-memcg-per-cpu caching of per-node statistics. In order
4901 * to work fine, we should make sure that the overfill threshold can't
4902 * exceed S32_MAX / PAGE_SIZE.
4903 */
4904 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4905
4906 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4907 memcg_hotplug_cpu_dead);
4908
4909 for_each_possible_cpu(cpu)
4910 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4911 drain_local_stock);
4912
4913 return 0;
4914 }
4915 subsys_initcall(mem_cgroup_init);
4916
4917 #ifdef CONFIG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)4918 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
4919 {
4920 while (!refcount_inc_not_zero(&memcg->id.ref)) {
4921 /*
4922 * The root cgroup cannot be destroyed, so it's refcount must
4923 * always be >= 1.
4924 */
4925 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4926 VM_BUG_ON(1);
4927 break;
4928 }
4929 memcg = parent_mem_cgroup(memcg);
4930 if (!memcg)
4931 memcg = root_mem_cgroup;
4932 }
4933 return memcg;
4934 }
4935
4936 /**
4937 * mem_cgroup_swapout - transfer a memsw charge to swap
4938 * @folio: folio whose memsw charge to transfer
4939 * @entry: swap entry to move the charge to
4940 *
4941 * Transfer the memsw charge of @folio to @entry.
4942 */
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)4943 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4944 {
4945 struct mem_cgroup *memcg, *swap_memcg;
4946 unsigned int nr_entries;
4947 unsigned short oldid;
4948
4949 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4950 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
4951
4952 if (mem_cgroup_disabled())
4953 return;
4954
4955 if (!do_memsw_account())
4956 return;
4957
4958 memcg = folio_memcg(folio);
4959
4960 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4961 if (!memcg)
4962 return;
4963
4964 /*
4965 * In case the memcg owning these pages has been offlined and doesn't
4966 * have an ID allocated to it anymore, charge the closest online
4967 * ancestor for the swap instead and transfer the memory+swap charge.
4968 */
4969 swap_memcg = mem_cgroup_id_get_online(memcg);
4970 nr_entries = folio_nr_pages(folio);
4971 /* Get references for the tail pages, too */
4972 if (nr_entries > 1)
4973 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
4974 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
4975 nr_entries);
4976 VM_BUG_ON_FOLIO(oldid, folio);
4977 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
4978
4979 folio->memcg_data = 0;
4980
4981 if (!mem_cgroup_is_root(memcg))
4982 page_counter_uncharge(&memcg->memory, nr_entries);
4983
4984 if (memcg != swap_memcg) {
4985 if (!mem_cgroup_is_root(swap_memcg))
4986 page_counter_charge(&swap_memcg->memsw, nr_entries);
4987 page_counter_uncharge(&memcg->memsw, nr_entries);
4988 }
4989
4990 memcg1_swapout(folio, memcg);
4991 css_put(&memcg->css);
4992 }
4993
4994 /**
4995 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
4996 * @folio: folio being added to swap
4997 * @entry: swap entry to charge
4998 *
4999 * Try to charge @folio's memcg for the swap space at @entry.
5000 *
5001 * Returns 0 on success, -ENOMEM on failure.
5002 */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5003 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5004 {
5005 unsigned int nr_pages = folio_nr_pages(folio);
5006 struct page_counter *counter;
5007 struct mem_cgroup *memcg;
5008 unsigned short oldid;
5009
5010 if (do_memsw_account())
5011 return 0;
5012
5013 memcg = folio_memcg(folio);
5014
5015 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5016 if (!memcg)
5017 return 0;
5018
5019 if (!entry.val) {
5020 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5021 return 0;
5022 }
5023
5024 memcg = mem_cgroup_id_get_online(memcg);
5025
5026 if (!mem_cgroup_is_root(memcg) &&
5027 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5028 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5029 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5030 mem_cgroup_id_put(memcg);
5031 return -ENOMEM;
5032 }
5033
5034 /* Get references for the tail pages, too */
5035 if (nr_pages > 1)
5036 mem_cgroup_id_get_many(memcg, nr_pages - 1);
5037 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
5038 VM_BUG_ON_FOLIO(oldid, folio);
5039 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5040
5041 return 0;
5042 }
5043
5044 /**
5045 * __mem_cgroup_uncharge_swap - uncharge swap space
5046 * @entry: swap entry to uncharge
5047 * @nr_pages: the amount of swap space to uncharge
5048 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5049 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5050 {
5051 struct mem_cgroup *memcg;
5052 unsigned short id;
5053
5054 id = swap_cgroup_record(entry, 0, nr_pages);
5055 rcu_read_lock();
5056 memcg = mem_cgroup_from_id(id);
5057 if (memcg) {
5058 if (!mem_cgroup_is_root(memcg)) {
5059 if (do_memsw_account())
5060 page_counter_uncharge(&memcg->memsw, nr_pages);
5061 else
5062 page_counter_uncharge(&memcg->swap, nr_pages);
5063 }
5064 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5065 mem_cgroup_id_put_many(memcg, nr_pages);
5066 }
5067 rcu_read_unlock();
5068 }
5069
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5070 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5071 {
5072 long nr_swap_pages = get_nr_swap_pages();
5073
5074 if (mem_cgroup_disabled() || do_memsw_account())
5075 return nr_swap_pages;
5076 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5077 nr_swap_pages = min_t(long, nr_swap_pages,
5078 READ_ONCE(memcg->swap.max) -
5079 page_counter_read(&memcg->swap));
5080 return nr_swap_pages;
5081 }
5082
mem_cgroup_swap_full(struct folio * folio)5083 bool mem_cgroup_swap_full(struct folio *folio)
5084 {
5085 struct mem_cgroup *memcg;
5086
5087 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5088
5089 if (vm_swap_full())
5090 return true;
5091 if (do_memsw_account())
5092 return false;
5093
5094 memcg = folio_memcg(folio);
5095 if (!memcg)
5096 return false;
5097
5098 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5099 unsigned long usage = page_counter_read(&memcg->swap);
5100
5101 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5102 usage * 2 >= READ_ONCE(memcg->swap.max))
5103 return true;
5104 }
5105
5106 return false;
5107 }
5108
setup_swap_account(char * s)5109 static int __init setup_swap_account(char *s)
5110 {
5111 bool res;
5112
5113 if (!kstrtobool(s, &res) && !res)
5114 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5115 "in favor of configuring swap control via cgroupfs. "
5116 "Please report your usecase to linux-mm@kvack.org if you "
5117 "depend on this functionality.\n");
5118 return 1;
5119 }
5120 __setup("swapaccount=", setup_swap_account);
5121
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5122 static u64 swap_current_read(struct cgroup_subsys_state *css,
5123 struct cftype *cft)
5124 {
5125 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5126
5127 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5128 }
5129
swap_peak_show(struct seq_file * sf,void * v)5130 static int swap_peak_show(struct seq_file *sf, void *v)
5131 {
5132 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5133
5134 return peak_show(sf, v, &memcg->swap);
5135 }
5136
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5137 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5138 size_t nbytes, loff_t off)
5139 {
5140 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5141
5142 return peak_write(of, buf, nbytes, off, &memcg->swap,
5143 &memcg->swap_peaks);
5144 }
5145
swap_high_show(struct seq_file * m,void * v)5146 static int swap_high_show(struct seq_file *m, void *v)
5147 {
5148 return seq_puts_memcg_tunable(m,
5149 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5150 }
5151
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5152 static ssize_t swap_high_write(struct kernfs_open_file *of,
5153 char *buf, size_t nbytes, loff_t off)
5154 {
5155 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5156 unsigned long high;
5157 int err;
5158
5159 buf = strstrip(buf);
5160 err = page_counter_memparse(buf, "max", &high);
5161 if (err)
5162 return err;
5163
5164 page_counter_set_high(&memcg->swap, high);
5165
5166 return nbytes;
5167 }
5168
swap_max_show(struct seq_file * m,void * v)5169 static int swap_max_show(struct seq_file *m, void *v)
5170 {
5171 return seq_puts_memcg_tunable(m,
5172 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5173 }
5174
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5175 static ssize_t swap_max_write(struct kernfs_open_file *of,
5176 char *buf, size_t nbytes, loff_t off)
5177 {
5178 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5179 unsigned long max;
5180 int err;
5181
5182 buf = strstrip(buf);
5183 err = page_counter_memparse(buf, "max", &max);
5184 if (err)
5185 return err;
5186
5187 xchg(&memcg->swap.max, max);
5188
5189 return nbytes;
5190 }
5191
swap_events_show(struct seq_file * m,void * v)5192 static int swap_events_show(struct seq_file *m, void *v)
5193 {
5194 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5195
5196 seq_printf(m, "high %lu\n",
5197 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5198 seq_printf(m, "max %lu\n",
5199 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5200 seq_printf(m, "fail %lu\n",
5201 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5202
5203 return 0;
5204 }
5205
5206 static struct cftype swap_files[] = {
5207 {
5208 .name = "swap.current",
5209 .flags = CFTYPE_NOT_ON_ROOT,
5210 .read_u64 = swap_current_read,
5211 },
5212 {
5213 .name = "swap.high",
5214 .flags = CFTYPE_NOT_ON_ROOT,
5215 .seq_show = swap_high_show,
5216 .write = swap_high_write,
5217 },
5218 {
5219 .name = "swap.max",
5220 .flags = CFTYPE_NOT_ON_ROOT,
5221 .seq_show = swap_max_show,
5222 .write = swap_max_write,
5223 },
5224 {
5225 .name = "swap.peak",
5226 .flags = CFTYPE_NOT_ON_ROOT,
5227 .open = peak_open,
5228 .release = peak_release,
5229 .seq_show = swap_peak_show,
5230 .write = swap_peak_write,
5231 },
5232 {
5233 .name = "swap.events",
5234 .flags = CFTYPE_NOT_ON_ROOT,
5235 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5236 .seq_show = swap_events_show,
5237 },
5238 { } /* terminate */
5239 };
5240
5241 #ifdef CONFIG_ZSWAP
5242 /**
5243 * obj_cgroup_may_zswap - check if this cgroup can zswap
5244 * @objcg: the object cgroup
5245 *
5246 * Check if the hierarchical zswap limit has been reached.
5247 *
5248 * This doesn't check for specific headroom, and it is not atomic
5249 * either. But with zswap, the size of the allocation is only known
5250 * once compression has occurred, and this optimistic pre-check avoids
5251 * spending cycles on compression when there is already no room left
5252 * or zswap is disabled altogether somewhere in the hierarchy.
5253 */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5254 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5255 {
5256 struct mem_cgroup *memcg, *original_memcg;
5257 bool ret = true;
5258
5259 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5260 return true;
5261
5262 original_memcg = get_mem_cgroup_from_objcg(objcg);
5263 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5264 memcg = parent_mem_cgroup(memcg)) {
5265 unsigned long max = READ_ONCE(memcg->zswap_max);
5266 unsigned long pages;
5267
5268 if (max == PAGE_COUNTER_MAX)
5269 continue;
5270 if (max == 0) {
5271 ret = false;
5272 break;
5273 }
5274
5275 /*
5276 * mem_cgroup_flush_stats() ignores small changes. Use
5277 * do_flush_stats() directly to get accurate stats for charging.
5278 */
5279 do_flush_stats(memcg);
5280 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5281 if (pages < max)
5282 continue;
5283 ret = false;
5284 break;
5285 }
5286 mem_cgroup_put(original_memcg);
5287 return ret;
5288 }
5289
5290 /**
5291 * obj_cgroup_charge_zswap - charge compression backend memory
5292 * @objcg: the object cgroup
5293 * @size: size of compressed object
5294 *
5295 * This forces the charge after obj_cgroup_may_zswap() allowed
5296 * compression and storage in zwap for this cgroup to go ahead.
5297 */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5298 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5299 {
5300 struct mem_cgroup *memcg;
5301
5302 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5303 return;
5304
5305 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5306
5307 /* PF_MEMALLOC context, charging must succeed */
5308 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5309 VM_WARN_ON_ONCE(1);
5310
5311 rcu_read_lock();
5312 memcg = obj_cgroup_memcg(objcg);
5313 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5314 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5315 rcu_read_unlock();
5316 }
5317
5318 /**
5319 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5320 * @objcg: the object cgroup
5321 * @size: size of compressed object
5322 *
5323 * Uncharges zswap memory on page in.
5324 */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5325 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5326 {
5327 struct mem_cgroup *memcg;
5328
5329 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5330 return;
5331
5332 obj_cgroup_uncharge(objcg, size);
5333
5334 rcu_read_lock();
5335 memcg = obj_cgroup_memcg(objcg);
5336 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5337 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5338 rcu_read_unlock();
5339 }
5340
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5341 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5342 {
5343 /* if zswap is disabled, do not block pages going to the swapping device */
5344 if (!zswap_is_enabled())
5345 return true;
5346
5347 for (; memcg; memcg = parent_mem_cgroup(memcg))
5348 if (!READ_ONCE(memcg->zswap_writeback))
5349 return false;
5350
5351 return true;
5352 }
5353
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5354 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5355 struct cftype *cft)
5356 {
5357 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5358
5359 mem_cgroup_flush_stats(memcg);
5360 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5361 }
5362
zswap_max_show(struct seq_file * m,void * v)5363 static int zswap_max_show(struct seq_file *m, void *v)
5364 {
5365 return seq_puts_memcg_tunable(m,
5366 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5367 }
5368
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5369 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5370 char *buf, size_t nbytes, loff_t off)
5371 {
5372 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5373 unsigned long max;
5374 int err;
5375
5376 buf = strstrip(buf);
5377 err = page_counter_memparse(buf, "max", &max);
5378 if (err)
5379 return err;
5380
5381 xchg(&memcg->zswap_max, max);
5382
5383 return nbytes;
5384 }
5385
zswap_writeback_show(struct seq_file * m,void * v)5386 static int zswap_writeback_show(struct seq_file *m, void *v)
5387 {
5388 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5389
5390 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5391 return 0;
5392 }
5393
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5394 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5395 char *buf, size_t nbytes, loff_t off)
5396 {
5397 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5398 int zswap_writeback;
5399 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5400
5401 if (parse_ret)
5402 return parse_ret;
5403
5404 if (zswap_writeback != 0 && zswap_writeback != 1)
5405 return -EINVAL;
5406
5407 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5408 return nbytes;
5409 }
5410
5411 static struct cftype zswap_files[] = {
5412 {
5413 .name = "zswap.current",
5414 .flags = CFTYPE_NOT_ON_ROOT,
5415 .read_u64 = zswap_current_read,
5416 },
5417 {
5418 .name = "zswap.max",
5419 .flags = CFTYPE_NOT_ON_ROOT,
5420 .seq_show = zswap_max_show,
5421 .write = zswap_max_write,
5422 },
5423 {
5424 .name = "zswap.writeback",
5425 .seq_show = zswap_writeback_show,
5426 .write = zswap_writeback_write,
5427 },
5428 { } /* terminate */
5429 };
5430 #endif /* CONFIG_ZSWAP */
5431
mem_cgroup_swap_init(void)5432 static int __init mem_cgroup_swap_init(void)
5433 {
5434 if (mem_cgroup_disabled())
5435 return 0;
5436
5437 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5438 #ifdef CONFIG_MEMCG_V1
5439 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5440 #endif
5441 #ifdef CONFIG_ZSWAP
5442 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5443 #endif
5444 return 0;
5445 }
5446 subsys_initcall(mem_cgroup_swap_init);
5447
5448 #endif /* CONFIG_SWAP */
5449