1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/pagevec.h>
37 #include <linux/vm_event_item.h>
38 #include <linux/smp.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
41 #include <linux/bit_spinlock.h>
42 #include <linux/rcupdate.h>
43 #include <linux/limits.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/mutex.h>
47 #include <linux/rbtree.h>
48 #include <linux/slab.h>
49 #include <linux/swapops.h>
50 #include <linux/spinlock.h>
51 #include <linux/fs.h>
52 #include <linux/seq_file.h>
53 #include <linux/parser.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71
72 #include <linux/uaccess.h>
73
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77
78 #include <trace/events/vmscan.h>
79
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
88
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
91
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
94
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
97
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101
task_is_dying(void)102 static inline bool task_is_dying(void)
103 {
104 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
105 (current->flags & PF_EXITING);
106 }
107
108 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
110 {
111 if (!memcg)
112 memcg = root_mem_cgroup;
113 return &memcg->vmpressure;
114 }
115
vmpressure_to_memcg(struct vmpressure * vmpr)116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
117 {
118 return container_of(vmpr, struct mem_cgroup, vmpressure);
119 }
120
121 #define SEQ_BUF_SIZE SZ_4K
122 #define CURRENT_OBJCG_UPDATE_BIT 0
123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
124
125 static DEFINE_SPINLOCK(objcg_lock);
126
mem_cgroup_kmem_disabled(void)127 bool mem_cgroup_kmem_disabled(void)
128 {
129 return cgroup_memory_nokmem;
130 }
131
132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
133 unsigned int nr_pages);
134
obj_cgroup_release(struct percpu_ref * ref)135 static void obj_cgroup_release(struct percpu_ref *ref)
136 {
137 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
138 unsigned int nr_bytes;
139 unsigned int nr_pages;
140 unsigned long flags;
141
142 /*
143 * At this point all allocated objects are freed, and
144 * objcg->nr_charged_bytes can't have an arbitrary byte value.
145 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
146 *
147 * The following sequence can lead to it:
148 * 1) CPU0: objcg == stock->cached_objcg
149 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
150 * PAGE_SIZE bytes are charged
151 * 3) CPU1: a process from another memcg is allocating something,
152 * the stock if flushed,
153 * objcg->nr_charged_bytes = PAGE_SIZE - 92
154 * 5) CPU0: we do release this object,
155 * 92 bytes are added to stock->nr_bytes
156 * 6) CPU0: stock is flushed,
157 * 92 bytes are added to objcg->nr_charged_bytes
158 *
159 * In the result, nr_charged_bytes == PAGE_SIZE.
160 * This page will be uncharged in obj_cgroup_release().
161 */
162 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
163 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
164 nr_pages = nr_bytes >> PAGE_SHIFT;
165
166 if (nr_pages)
167 obj_cgroup_uncharge_pages(objcg, nr_pages);
168
169 spin_lock_irqsave(&objcg_lock, flags);
170 list_del(&objcg->list);
171 spin_unlock_irqrestore(&objcg_lock, flags);
172
173 percpu_ref_exit(ref);
174 kfree_rcu(objcg, rcu);
175 }
176
obj_cgroup_alloc(void)177 static struct obj_cgroup *obj_cgroup_alloc(void)
178 {
179 struct obj_cgroup *objcg;
180 int ret;
181
182 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
183 if (!objcg)
184 return NULL;
185
186 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
187 GFP_KERNEL);
188 if (ret) {
189 kfree(objcg);
190 return NULL;
191 }
192 INIT_LIST_HEAD(&objcg->list);
193 return objcg;
194 }
195
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
197 struct mem_cgroup *parent)
198 {
199 struct obj_cgroup *objcg, *iter;
200
201 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
202
203 spin_lock_irq(&objcg_lock);
204
205 /* 1) Ready to reparent active objcg. */
206 list_add(&objcg->list, &memcg->objcg_list);
207 /* 2) Reparent active objcg and already reparented objcgs to parent. */
208 list_for_each_entry(iter, &memcg->objcg_list, list)
209 WRITE_ONCE(iter->memcg, parent);
210 /* 3) Move already reparented objcgs to the parent's list */
211 list_splice(&memcg->objcg_list, &parent->objcg_list);
212
213 spin_unlock_irq(&objcg_lock);
214
215 percpu_ref_kill(&objcg->refcnt);
216 }
217
218 /*
219 * A lot of the calls to the cache allocation functions are expected to be
220 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
221 * conditional to this static branch, we'll have to allow modules that does
222 * kmem_cache_alloc and the such to see this symbol as well
223 */
224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
225 EXPORT_SYMBOL(memcg_kmem_online_key);
226
227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
228 EXPORT_SYMBOL(memcg_bpf_enabled_key);
229
230 /**
231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
232 * @folio: folio of interest
233 *
234 * If memcg is bound to the default hierarchy, css of the memcg associated
235 * with @folio is returned. The returned css remains associated with @folio
236 * until it is released.
237 *
238 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
239 * is returned.
240 */
mem_cgroup_css_from_folio(struct folio * folio)241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
242 {
243 struct mem_cgroup *memcg = folio_memcg(folio);
244
245 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
246 memcg = root_mem_cgroup;
247
248 return &memcg->css;
249 }
250
251 /**
252 * page_cgroup_ino - return inode number of the memcg a page is charged to
253 * @page: the page
254 *
255 * Look up the closest online ancestor of the memory cgroup @page is charged to
256 * and return its inode number or 0 if @page is not charged to any cgroup. It
257 * is safe to call this function without holding a reference to @page.
258 *
259 * Note, this function is inherently racy, because there is nothing to prevent
260 * the cgroup inode from getting torn down and potentially reallocated a moment
261 * after page_cgroup_ino() returns, so it only should be used by callers that
262 * do not care (such as procfs interfaces).
263 */
page_cgroup_ino(struct page * page)264 ino_t page_cgroup_ino(struct page *page)
265 {
266 struct mem_cgroup *memcg;
267 unsigned long ino = 0;
268
269 rcu_read_lock();
270 /* page_folio() is racy here, but the entire function is racy anyway */
271 memcg = folio_memcg_check(page_folio(page));
272
273 while (memcg && !(memcg->css.flags & CSS_ONLINE))
274 memcg = parent_mem_cgroup(memcg);
275 if (memcg)
276 ino = cgroup_ino(memcg->css.cgroup);
277 rcu_read_unlock();
278 return ino;
279 }
280
281 /* Subset of node_stat_item for memcg stats */
282 static const unsigned int memcg_node_stat_items[] = {
283 NR_INACTIVE_ANON,
284 NR_ACTIVE_ANON,
285 NR_INACTIVE_FILE,
286 NR_ACTIVE_FILE,
287 NR_UNEVICTABLE,
288 NR_SLAB_RECLAIMABLE_B,
289 NR_SLAB_UNRECLAIMABLE_B,
290 WORKINGSET_REFAULT_ANON,
291 WORKINGSET_REFAULT_FILE,
292 WORKINGSET_ACTIVATE_ANON,
293 WORKINGSET_ACTIVATE_FILE,
294 WORKINGSET_RESTORE_ANON,
295 WORKINGSET_RESTORE_FILE,
296 WORKINGSET_NODERECLAIM,
297 NR_ANON_MAPPED,
298 NR_FILE_MAPPED,
299 NR_FILE_PAGES,
300 NR_FILE_DIRTY,
301 NR_WRITEBACK,
302 NR_SHMEM,
303 NR_SHMEM_THPS,
304 NR_FILE_THPS,
305 NR_ANON_THPS,
306 NR_KERNEL_STACK_KB,
307 NR_PAGETABLE,
308 NR_SECONDARY_PAGETABLE,
309 #ifdef CONFIG_SWAP
310 NR_SWAPCACHE,
311 #endif
312 #ifdef CONFIG_NUMA_BALANCING
313 PGPROMOTE_SUCCESS,
314 #endif
315 PGDEMOTE_KSWAPD,
316 PGDEMOTE_DIRECT,
317 PGDEMOTE_KHUGEPAGED,
318 PGDEMOTE_PROACTIVE,
319 #ifdef CONFIG_HUGETLB_PAGE
320 NR_HUGETLB,
321 #endif
322 };
323
324 static const unsigned int memcg_stat_items[] = {
325 MEMCG_SWAP,
326 MEMCG_SOCK,
327 MEMCG_PERCPU_B,
328 MEMCG_VMALLOC,
329 MEMCG_KMEM,
330 MEMCG_ZSWAP_B,
331 MEMCG_ZSWAPPED,
332 };
333
334 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
335 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
336 ARRAY_SIZE(memcg_stat_items))
337 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
338 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
339
init_memcg_stats(void)340 static void init_memcg_stats(void)
341 {
342 u8 i, j = 0;
343
344 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
345
346 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
347
348 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
349 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
350
351 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
352 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
353 }
354
memcg_stats_index(int idx)355 static inline int memcg_stats_index(int idx)
356 {
357 return mem_cgroup_stats_index[idx];
358 }
359
360 struct lruvec_stats_percpu {
361 /* Local (CPU and cgroup) state */
362 long state[NR_MEMCG_NODE_STAT_ITEMS];
363
364 /* Delta calculation for lockless upward propagation */
365 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
366 };
367
368 struct lruvec_stats {
369 /* Aggregated (CPU and subtree) state */
370 long state[NR_MEMCG_NODE_STAT_ITEMS];
371
372 /* Non-hierarchical (CPU aggregated) state */
373 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
374
375 /* Pending child counts during tree propagation */
376 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
377 };
378
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)379 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
380 {
381 struct mem_cgroup_per_node *pn;
382 long x;
383 int i;
384
385 if (mem_cgroup_disabled())
386 return node_page_state(lruvec_pgdat(lruvec), idx);
387
388 i = memcg_stats_index(idx);
389 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
390 return 0;
391
392 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
393 x = READ_ONCE(pn->lruvec_stats->state[i]);
394 #ifdef CONFIG_SMP
395 if (x < 0)
396 x = 0;
397 #endif
398 return x;
399 }
400
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)401 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
402 enum node_stat_item idx)
403 {
404 struct mem_cgroup_per_node *pn;
405 long x;
406 int i;
407
408 if (mem_cgroup_disabled())
409 return node_page_state(lruvec_pgdat(lruvec), idx);
410
411 i = memcg_stats_index(idx);
412 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
413 return 0;
414
415 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
416 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
417 #ifdef CONFIG_SMP
418 if (x < 0)
419 x = 0;
420 #endif
421 return x;
422 }
423
424 /* Subset of vm_event_item to report for memcg event stats */
425 static const unsigned int memcg_vm_event_stat[] = {
426 #ifdef CONFIG_MEMCG_V1
427 PGPGIN,
428 PGPGOUT,
429 #endif
430 PSWPIN,
431 PSWPOUT,
432 PGSCAN_KSWAPD,
433 PGSCAN_DIRECT,
434 PGSCAN_KHUGEPAGED,
435 PGSCAN_PROACTIVE,
436 PGSTEAL_KSWAPD,
437 PGSTEAL_DIRECT,
438 PGSTEAL_KHUGEPAGED,
439 PGSTEAL_PROACTIVE,
440 PGFAULT,
441 PGMAJFAULT,
442 PGREFILL,
443 PGACTIVATE,
444 PGDEACTIVATE,
445 PGLAZYFREE,
446 PGLAZYFREED,
447 #ifdef CONFIG_SWAP
448 SWPIN_ZERO,
449 SWPOUT_ZERO,
450 #endif
451 #ifdef CONFIG_ZSWAP
452 ZSWPIN,
453 ZSWPOUT,
454 ZSWPWB,
455 #endif
456 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
457 THP_FAULT_ALLOC,
458 THP_COLLAPSE_ALLOC,
459 THP_SWPOUT,
460 THP_SWPOUT_FALLBACK,
461 #endif
462 #ifdef CONFIG_NUMA_BALANCING
463 NUMA_PAGE_MIGRATE,
464 NUMA_PTE_UPDATES,
465 NUMA_HINT_FAULTS,
466 #endif
467 };
468
469 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
470 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
471
init_memcg_events(void)472 static void init_memcg_events(void)
473 {
474 u8 i;
475
476 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
477
478 memset(mem_cgroup_events_index, U8_MAX,
479 sizeof(mem_cgroup_events_index));
480
481 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
482 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
483 }
484
memcg_events_index(enum vm_event_item idx)485 static inline int memcg_events_index(enum vm_event_item idx)
486 {
487 return mem_cgroup_events_index[idx];
488 }
489
490 struct memcg_vmstats_percpu {
491 /* Stats updates since the last flush */
492 unsigned int stats_updates;
493
494 /* Cached pointers for fast iteration in memcg_rstat_updated() */
495 struct memcg_vmstats_percpu *parent;
496 struct memcg_vmstats *vmstats;
497
498 /* The above should fit a single cacheline for memcg_rstat_updated() */
499
500 /* Local (CPU and cgroup) page state & events */
501 long state[MEMCG_VMSTAT_SIZE];
502 unsigned long events[NR_MEMCG_EVENTS];
503
504 /* Delta calculation for lockless upward propagation */
505 long state_prev[MEMCG_VMSTAT_SIZE];
506 unsigned long events_prev[NR_MEMCG_EVENTS];
507 } ____cacheline_aligned;
508
509 struct memcg_vmstats {
510 /* Aggregated (CPU and subtree) page state & events */
511 long state[MEMCG_VMSTAT_SIZE];
512 unsigned long events[NR_MEMCG_EVENTS];
513
514 /* Non-hierarchical (CPU aggregated) page state & events */
515 long state_local[MEMCG_VMSTAT_SIZE];
516 unsigned long events_local[NR_MEMCG_EVENTS];
517
518 /* Pending child counts during tree propagation */
519 long state_pending[MEMCG_VMSTAT_SIZE];
520 unsigned long events_pending[NR_MEMCG_EVENTS];
521
522 /* Stats updates since the last flush */
523 atomic64_t stats_updates;
524 };
525
526 /*
527 * memcg and lruvec stats flushing
528 *
529 * Many codepaths leading to stats update or read are performance sensitive and
530 * adding stats flushing in such codepaths is not desirable. So, to optimize the
531 * flushing the kernel does:
532 *
533 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
534 * rstat update tree grow unbounded.
535 *
536 * 2) Flush the stats synchronously on reader side only when there are more than
537 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
538 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
539 * only for 2 seconds due to (1).
540 */
541 static void flush_memcg_stats_dwork(struct work_struct *w);
542 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
543 static u64 flush_last_time;
544
545 #define FLUSH_TIME (2UL*HZ)
546
547 /*
548 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
549 * not rely on this as part of an acquired spinlock_t lock. These functions are
550 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
551 * is sufficient.
552 */
memcg_stats_lock(void)553 static void memcg_stats_lock(void)
554 {
555 preempt_disable_nested();
556 VM_WARN_ON_IRQS_ENABLED();
557 }
558
__memcg_stats_lock(void)559 static void __memcg_stats_lock(void)
560 {
561 preempt_disable_nested();
562 }
563
memcg_stats_unlock(void)564 static void memcg_stats_unlock(void)
565 {
566 preempt_enable_nested();
567 }
568
569
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)570 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
571 {
572 return atomic64_read(&vmstats->stats_updates) >
573 MEMCG_CHARGE_BATCH * num_online_cpus();
574 }
575
memcg_rstat_updated(struct mem_cgroup * memcg,int val)576 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
577 {
578 struct memcg_vmstats_percpu *statc;
579 int cpu = smp_processor_id();
580 unsigned int stats_updates;
581
582 if (!val)
583 return;
584
585 cgroup_rstat_updated(memcg->css.cgroup, cpu);
586 statc = this_cpu_ptr(memcg->vmstats_percpu);
587 for (; statc; statc = statc->parent) {
588 stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
589 WRITE_ONCE(statc->stats_updates, stats_updates);
590 if (stats_updates < MEMCG_CHARGE_BATCH)
591 continue;
592
593 /*
594 * If @memcg is already flush-able, increasing stats_updates is
595 * redundant. Avoid the overhead of the atomic update.
596 */
597 if (!memcg_vmstats_needs_flush(statc->vmstats))
598 atomic64_add(stats_updates,
599 &statc->vmstats->stats_updates);
600 WRITE_ONCE(statc->stats_updates, 0);
601 }
602 }
603
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)604 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
605 {
606 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
607
608 trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
609 force, needs_flush);
610
611 if (!force && !needs_flush)
612 return;
613
614 if (mem_cgroup_is_root(memcg))
615 WRITE_ONCE(flush_last_time, jiffies_64);
616
617 cgroup_rstat_flush(memcg->css.cgroup);
618 }
619
620 /*
621 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
622 * @memcg: root of the subtree to flush
623 *
624 * Flushing is serialized by the underlying global rstat lock. There is also a
625 * minimum amount of work to be done even if there are no stat updates to flush.
626 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
627 * avoids unnecessary work and contention on the underlying lock.
628 */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)629 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
630 {
631 if (mem_cgroup_disabled())
632 return;
633
634 if (!memcg)
635 memcg = root_mem_cgroup;
636
637 __mem_cgroup_flush_stats(memcg, false);
638 }
639
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)640 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
641 {
642 /* Only flush if the periodic flusher is one full cycle late */
643 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
644 mem_cgroup_flush_stats(memcg);
645 }
646
flush_memcg_stats_dwork(struct work_struct * w)647 static void flush_memcg_stats_dwork(struct work_struct *w)
648 {
649 /*
650 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
651 * in latency-sensitive paths is as cheap as possible.
652 */
653 __mem_cgroup_flush_stats(root_mem_cgroup, true);
654 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
655 }
656
memcg_page_state(struct mem_cgroup * memcg,int idx)657 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
658 {
659 long x;
660 int i = memcg_stats_index(idx);
661
662 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
663 return 0;
664
665 x = READ_ONCE(memcg->vmstats->state[i]);
666 #ifdef CONFIG_SMP
667 if (x < 0)
668 x = 0;
669 #endif
670 return x;
671 }
672
673 static int memcg_page_state_unit(int item);
674
675 /*
676 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
677 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
678 */
memcg_state_val_in_pages(int idx,int val)679 static int memcg_state_val_in_pages(int idx, int val)
680 {
681 int unit = memcg_page_state_unit(idx);
682
683 if (!val || unit == PAGE_SIZE)
684 return val;
685 else
686 return max(val * unit / PAGE_SIZE, 1UL);
687 }
688
689 /**
690 * __mod_memcg_state - update cgroup memory statistics
691 * @memcg: the memory cgroup
692 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
693 * @val: delta to add to the counter, can be negative
694 */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)695 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
696 int val)
697 {
698 int i = memcg_stats_index(idx);
699
700 if (mem_cgroup_disabled())
701 return;
702
703 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
704 return;
705
706 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
707 val = memcg_state_val_in_pages(idx, val);
708 memcg_rstat_updated(memcg, val);
709 trace_mod_memcg_state(memcg, idx, val);
710 }
711
712 #ifdef CONFIG_MEMCG_V1
713 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)714 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
715 {
716 long x;
717 int i = memcg_stats_index(idx);
718
719 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
720 return 0;
721
722 x = READ_ONCE(memcg->vmstats->state_local[i]);
723 #ifdef CONFIG_SMP
724 if (x < 0)
725 x = 0;
726 #endif
727 return x;
728 }
729 #endif
730
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)731 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
732 enum node_stat_item idx,
733 int val)
734 {
735 struct mem_cgroup_per_node *pn;
736 struct mem_cgroup *memcg;
737 int i = memcg_stats_index(idx);
738
739 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
740 return;
741
742 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
743 memcg = pn->memcg;
744
745 /*
746 * The caller from rmap relies on disabled preemption because they never
747 * update their counter from in-interrupt context. For these two
748 * counters we check that the update is never performed from an
749 * interrupt context while other caller need to have disabled interrupt.
750 */
751 __memcg_stats_lock();
752 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
753 switch (idx) {
754 case NR_ANON_MAPPED:
755 case NR_FILE_MAPPED:
756 case NR_ANON_THPS:
757 WARN_ON_ONCE(!in_task());
758 break;
759 default:
760 VM_WARN_ON_IRQS_ENABLED();
761 }
762 }
763
764 /* Update memcg */
765 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
766
767 /* Update lruvec */
768 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
769
770 val = memcg_state_val_in_pages(idx, val);
771 memcg_rstat_updated(memcg, val);
772 trace_mod_memcg_lruvec_state(memcg, idx, val);
773 memcg_stats_unlock();
774 }
775
776 /**
777 * __mod_lruvec_state - update lruvec memory statistics
778 * @lruvec: the lruvec
779 * @idx: the stat item
780 * @val: delta to add to the counter, can be negative
781 *
782 * The lruvec is the intersection of the NUMA node and a cgroup. This
783 * function updates the all three counters that are affected by a
784 * change of state at this level: per-node, per-cgroup, per-lruvec.
785 */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)786 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
787 int val)
788 {
789 /* Update node */
790 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
791
792 /* Update memcg and lruvec */
793 if (!mem_cgroup_disabled())
794 __mod_memcg_lruvec_state(lruvec, idx, val);
795 }
796
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)797 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
798 int val)
799 {
800 struct mem_cgroup *memcg;
801 pg_data_t *pgdat = folio_pgdat(folio);
802 struct lruvec *lruvec;
803
804 rcu_read_lock();
805 memcg = folio_memcg(folio);
806 /* Untracked pages have no memcg, no lruvec. Update only the node */
807 if (!memcg) {
808 rcu_read_unlock();
809 __mod_node_page_state(pgdat, idx, val);
810 return;
811 }
812
813 lruvec = mem_cgroup_lruvec(memcg, pgdat);
814 __mod_lruvec_state(lruvec, idx, val);
815 rcu_read_unlock();
816 }
817 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
818
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)819 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
820 {
821 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
822 struct mem_cgroup *memcg;
823 struct lruvec *lruvec;
824
825 rcu_read_lock();
826 memcg = mem_cgroup_from_slab_obj(p);
827
828 /*
829 * Untracked pages have no memcg, no lruvec. Update only the
830 * node. If we reparent the slab objects to the root memcg,
831 * when we free the slab object, we need to update the per-memcg
832 * vmstats to keep it correct for the root memcg.
833 */
834 if (!memcg) {
835 __mod_node_page_state(pgdat, idx, val);
836 } else {
837 lruvec = mem_cgroup_lruvec(memcg, pgdat);
838 __mod_lruvec_state(lruvec, idx, val);
839 }
840 rcu_read_unlock();
841 }
842
843 /**
844 * __count_memcg_events - account VM events in a cgroup
845 * @memcg: the memory cgroup
846 * @idx: the event item
847 * @count: the number of events that occurred
848 */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)849 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
850 unsigned long count)
851 {
852 int i = memcg_events_index(idx);
853
854 if (mem_cgroup_disabled())
855 return;
856
857 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
858 return;
859
860 memcg_stats_lock();
861 __this_cpu_add(memcg->vmstats_percpu->events[i], count);
862 memcg_rstat_updated(memcg, count);
863 trace_count_memcg_events(memcg, idx, count);
864 memcg_stats_unlock();
865 }
866
memcg_events(struct mem_cgroup * memcg,int event)867 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
868 {
869 int i = memcg_events_index(event);
870
871 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
872 return 0;
873
874 return READ_ONCE(memcg->vmstats->events[i]);
875 }
876
877 #ifdef CONFIG_MEMCG_V1
memcg_events_local(struct mem_cgroup * memcg,int event)878 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
879 {
880 int i = memcg_events_index(event);
881
882 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
883 return 0;
884
885 return READ_ONCE(memcg->vmstats->events_local[i]);
886 }
887 #endif
888
mem_cgroup_from_task(struct task_struct * p)889 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
890 {
891 /*
892 * mm_update_next_owner() may clear mm->owner to NULL
893 * if it races with swapoff, page migration, etc.
894 * So this can be called with p == NULL.
895 */
896 if (unlikely(!p))
897 return NULL;
898
899 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
900 }
901 EXPORT_SYMBOL(mem_cgroup_from_task);
902
active_memcg(void)903 static __always_inline struct mem_cgroup *active_memcg(void)
904 {
905 if (!in_task())
906 return this_cpu_read(int_active_memcg);
907 else
908 return current->active_memcg;
909 }
910
911 /**
912 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
913 * @mm: mm from which memcg should be extracted. It can be NULL.
914 *
915 * Obtain a reference on mm->memcg and returns it if successful. If mm
916 * is NULL, then the memcg is chosen as follows:
917 * 1) The active memcg, if set.
918 * 2) current->mm->memcg, if available
919 * 3) root memcg
920 * If mem_cgroup is disabled, NULL is returned.
921 */
get_mem_cgroup_from_mm(struct mm_struct * mm)922 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
923 {
924 struct mem_cgroup *memcg;
925
926 if (mem_cgroup_disabled())
927 return NULL;
928
929 /*
930 * Page cache insertions can happen without an
931 * actual mm context, e.g. during disk probing
932 * on boot, loopback IO, acct() writes etc.
933 *
934 * No need to css_get on root memcg as the reference
935 * counting is disabled on the root level in the
936 * cgroup core. See CSS_NO_REF.
937 */
938 if (unlikely(!mm)) {
939 memcg = active_memcg();
940 if (unlikely(memcg)) {
941 /* remote memcg must hold a ref */
942 css_get(&memcg->css);
943 return memcg;
944 }
945 mm = current->mm;
946 if (unlikely(!mm))
947 return root_mem_cgroup;
948 }
949
950 rcu_read_lock();
951 do {
952 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
953 if (unlikely(!memcg))
954 memcg = root_mem_cgroup;
955 } while (!css_tryget(&memcg->css));
956 rcu_read_unlock();
957 return memcg;
958 }
959 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
960
961 /**
962 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
963 */
get_mem_cgroup_from_current(void)964 struct mem_cgroup *get_mem_cgroup_from_current(void)
965 {
966 struct mem_cgroup *memcg;
967
968 if (mem_cgroup_disabled())
969 return NULL;
970
971 again:
972 rcu_read_lock();
973 memcg = mem_cgroup_from_task(current);
974 if (!css_tryget(&memcg->css)) {
975 rcu_read_unlock();
976 goto again;
977 }
978 rcu_read_unlock();
979 return memcg;
980 }
981
982 /**
983 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
984 * @folio: folio from which memcg should be extracted.
985 */
get_mem_cgroup_from_folio(struct folio * folio)986 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
987 {
988 struct mem_cgroup *memcg = folio_memcg(folio);
989
990 if (mem_cgroup_disabled())
991 return NULL;
992
993 rcu_read_lock();
994 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
995 memcg = root_mem_cgroup;
996 rcu_read_unlock();
997 return memcg;
998 }
999
1000 /**
1001 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1002 * @root: hierarchy root
1003 * @prev: previously returned memcg, NULL on first invocation
1004 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1005 *
1006 * Returns references to children of the hierarchy below @root, or
1007 * @root itself, or %NULL after a full round-trip.
1008 *
1009 * Caller must pass the return value in @prev on subsequent
1010 * invocations for reference counting, or use mem_cgroup_iter_break()
1011 * to cancel a hierarchy walk before the round-trip is complete.
1012 *
1013 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1014 * in the hierarchy among all concurrent reclaimers operating on the
1015 * same node.
1016 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1017 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1018 struct mem_cgroup *prev,
1019 struct mem_cgroup_reclaim_cookie *reclaim)
1020 {
1021 struct mem_cgroup_reclaim_iter *iter;
1022 struct cgroup_subsys_state *css;
1023 struct mem_cgroup *pos;
1024 struct mem_cgroup *next;
1025
1026 if (mem_cgroup_disabled())
1027 return NULL;
1028
1029 if (!root)
1030 root = root_mem_cgroup;
1031
1032 rcu_read_lock();
1033 restart:
1034 next = NULL;
1035
1036 if (reclaim) {
1037 int gen;
1038 int nid = reclaim->pgdat->node_id;
1039
1040 iter = &root->nodeinfo[nid]->iter;
1041 gen = atomic_read(&iter->generation);
1042
1043 /*
1044 * On start, join the current reclaim iteration cycle.
1045 * Exit when a concurrent walker completes it.
1046 */
1047 if (!prev)
1048 reclaim->generation = gen;
1049 else if (reclaim->generation != gen)
1050 goto out_unlock;
1051
1052 pos = READ_ONCE(iter->position);
1053 } else
1054 pos = prev;
1055
1056 css = pos ? &pos->css : NULL;
1057
1058 while ((css = css_next_descendant_pre(css, &root->css))) {
1059 /*
1060 * Verify the css and acquire a reference. The root
1061 * is provided by the caller, so we know it's alive
1062 * and kicking, and don't take an extra reference.
1063 */
1064 if (css == &root->css || css_tryget(css))
1065 break;
1066 }
1067
1068 next = mem_cgroup_from_css(css);
1069
1070 if (reclaim) {
1071 /*
1072 * The position could have already been updated by a competing
1073 * thread, so check that the value hasn't changed since we read
1074 * it to avoid reclaiming from the same cgroup twice.
1075 */
1076 if (cmpxchg(&iter->position, pos, next) != pos) {
1077 if (css && css != &root->css)
1078 css_put(css);
1079 goto restart;
1080 }
1081
1082 if (!next) {
1083 atomic_inc(&iter->generation);
1084
1085 /*
1086 * Reclaimers share the hierarchy walk, and a
1087 * new one might jump in right at the end of
1088 * the hierarchy - make sure they see at least
1089 * one group and restart from the beginning.
1090 */
1091 if (!prev)
1092 goto restart;
1093 }
1094 }
1095
1096 out_unlock:
1097 rcu_read_unlock();
1098 if (prev && prev != root)
1099 css_put(&prev->css);
1100
1101 return next;
1102 }
1103
1104 /**
1105 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1106 * @root: hierarchy root
1107 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1108 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1109 void mem_cgroup_iter_break(struct mem_cgroup *root,
1110 struct mem_cgroup *prev)
1111 {
1112 if (!root)
1113 root = root_mem_cgroup;
1114 if (prev && prev != root)
1115 css_put(&prev->css);
1116 }
1117
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1118 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1119 struct mem_cgroup *dead_memcg)
1120 {
1121 struct mem_cgroup_reclaim_iter *iter;
1122 struct mem_cgroup_per_node *mz;
1123 int nid;
1124
1125 for_each_node(nid) {
1126 mz = from->nodeinfo[nid];
1127 iter = &mz->iter;
1128 cmpxchg(&iter->position, dead_memcg, NULL);
1129 }
1130 }
1131
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1132 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1133 {
1134 struct mem_cgroup *memcg = dead_memcg;
1135 struct mem_cgroup *last;
1136
1137 do {
1138 __invalidate_reclaim_iterators(memcg, dead_memcg);
1139 last = memcg;
1140 } while ((memcg = parent_mem_cgroup(memcg)));
1141
1142 /*
1143 * When cgroup1 non-hierarchy mode is used,
1144 * parent_mem_cgroup() does not walk all the way up to the
1145 * cgroup root (root_mem_cgroup). So we have to handle
1146 * dead_memcg from cgroup root separately.
1147 */
1148 if (!mem_cgroup_is_root(last))
1149 __invalidate_reclaim_iterators(root_mem_cgroup,
1150 dead_memcg);
1151 }
1152
1153 /**
1154 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155 * @memcg: hierarchy root
1156 * @fn: function to call for each task
1157 * @arg: argument passed to @fn
1158 *
1159 * This function iterates over tasks attached to @memcg or to any of its
1160 * descendants and calls @fn for each task. If @fn returns a non-zero
1161 * value, the function breaks the iteration loop. Otherwise, it will iterate
1162 * over all tasks and return 0.
1163 *
1164 * This function must not be called for the root memory cgroup.
1165 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1166 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1167 int (*fn)(struct task_struct *, void *), void *arg)
1168 {
1169 struct mem_cgroup *iter;
1170 int ret = 0;
1171 int i = 0;
1172
1173 BUG_ON(mem_cgroup_is_root(memcg));
1174
1175 for_each_mem_cgroup_tree(iter, memcg) {
1176 struct css_task_iter it;
1177 struct task_struct *task;
1178
1179 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1180 while (!ret && (task = css_task_iter_next(&it))) {
1181 /* Avoid potential softlockup warning */
1182 if ((++i & 1023) == 0)
1183 cond_resched();
1184 ret = fn(task, arg);
1185 }
1186 css_task_iter_end(&it);
1187 if (ret) {
1188 mem_cgroup_iter_break(memcg, iter);
1189 break;
1190 }
1191 }
1192 }
1193
1194 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1195 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1196 {
1197 struct mem_cgroup *memcg;
1198
1199 if (mem_cgroup_disabled())
1200 return;
1201
1202 memcg = folio_memcg(folio);
1203
1204 if (!memcg)
1205 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1206 else
1207 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1208 }
1209 #endif
1210
1211 /**
1212 * folio_lruvec_lock - Lock the lruvec for a folio.
1213 * @folio: Pointer to the folio.
1214 *
1215 * These functions are safe to use under any of the following conditions:
1216 * - folio locked
1217 * - folio_test_lru false
1218 * - folio frozen (refcount of 0)
1219 *
1220 * Return: The lruvec this folio is on with its lock held.
1221 */
folio_lruvec_lock(struct folio * folio)1222 struct lruvec *folio_lruvec_lock(struct folio *folio)
1223 {
1224 struct lruvec *lruvec = folio_lruvec(folio);
1225
1226 spin_lock(&lruvec->lru_lock);
1227 lruvec_memcg_debug(lruvec, folio);
1228
1229 return lruvec;
1230 }
1231
1232 /**
1233 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1234 * @folio: Pointer to the folio.
1235 *
1236 * These functions are safe to use under any of the following conditions:
1237 * - folio locked
1238 * - folio_test_lru false
1239 * - folio frozen (refcount of 0)
1240 *
1241 * Return: The lruvec this folio is on with its lock held and interrupts
1242 * disabled.
1243 */
folio_lruvec_lock_irq(struct folio * folio)1244 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1245 {
1246 struct lruvec *lruvec = folio_lruvec(folio);
1247
1248 spin_lock_irq(&lruvec->lru_lock);
1249 lruvec_memcg_debug(lruvec, folio);
1250
1251 return lruvec;
1252 }
1253
1254 /**
1255 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1256 * @folio: Pointer to the folio.
1257 * @flags: Pointer to irqsave flags.
1258 *
1259 * These functions are safe to use under any of the following conditions:
1260 * - folio locked
1261 * - folio_test_lru false
1262 * - folio frozen (refcount of 0)
1263 *
1264 * Return: The lruvec this folio is on with its lock held and interrupts
1265 * disabled.
1266 */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1267 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1268 unsigned long *flags)
1269 {
1270 struct lruvec *lruvec = folio_lruvec(folio);
1271
1272 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1273 lruvec_memcg_debug(lruvec, folio);
1274
1275 return lruvec;
1276 }
1277
1278 /**
1279 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1280 * @lruvec: mem_cgroup per zone lru vector
1281 * @lru: index of lru list the page is sitting on
1282 * @zid: zone id of the accounted pages
1283 * @nr_pages: positive when adding or negative when removing
1284 *
1285 * This function must be called under lru_lock, just before a page is added
1286 * to or just after a page is removed from an lru list.
1287 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1288 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1289 int zid, int nr_pages)
1290 {
1291 struct mem_cgroup_per_node *mz;
1292 unsigned long *lru_size;
1293 long size;
1294
1295 if (mem_cgroup_disabled())
1296 return;
1297
1298 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1299 lru_size = &mz->lru_zone_size[zid][lru];
1300
1301 if (nr_pages < 0)
1302 *lru_size += nr_pages;
1303
1304 size = *lru_size;
1305 if (WARN_ONCE(size < 0,
1306 "%s(%p, %d, %d): lru_size %ld\n",
1307 __func__, lruvec, lru, nr_pages, size)) {
1308 VM_BUG_ON(1);
1309 *lru_size = 0;
1310 }
1311
1312 if (nr_pages > 0)
1313 *lru_size += nr_pages;
1314 }
1315
1316 /**
1317 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1318 * @memcg: the memory cgroup
1319 *
1320 * Returns the maximum amount of memory @mem can be charged with, in
1321 * pages.
1322 */
mem_cgroup_margin(struct mem_cgroup * memcg)1323 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1324 {
1325 unsigned long margin = 0;
1326 unsigned long count;
1327 unsigned long limit;
1328
1329 count = page_counter_read(&memcg->memory);
1330 limit = READ_ONCE(memcg->memory.max);
1331 if (count < limit)
1332 margin = limit - count;
1333
1334 if (do_memsw_account()) {
1335 count = page_counter_read(&memcg->memsw);
1336 limit = READ_ONCE(memcg->memsw.max);
1337 if (count < limit)
1338 margin = min(margin, limit - count);
1339 else
1340 margin = 0;
1341 }
1342
1343 return margin;
1344 }
1345
1346 struct memory_stat {
1347 const char *name;
1348 unsigned int idx;
1349 };
1350
1351 static const struct memory_stat memory_stats[] = {
1352 { "anon", NR_ANON_MAPPED },
1353 { "file", NR_FILE_PAGES },
1354 { "kernel", MEMCG_KMEM },
1355 { "kernel_stack", NR_KERNEL_STACK_KB },
1356 { "pagetables", NR_PAGETABLE },
1357 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1358 { "percpu", MEMCG_PERCPU_B },
1359 { "sock", MEMCG_SOCK },
1360 { "vmalloc", MEMCG_VMALLOC },
1361 { "shmem", NR_SHMEM },
1362 #ifdef CONFIG_ZSWAP
1363 { "zswap", MEMCG_ZSWAP_B },
1364 { "zswapped", MEMCG_ZSWAPPED },
1365 #endif
1366 { "file_mapped", NR_FILE_MAPPED },
1367 { "file_dirty", NR_FILE_DIRTY },
1368 { "file_writeback", NR_WRITEBACK },
1369 #ifdef CONFIG_SWAP
1370 { "swapcached", NR_SWAPCACHE },
1371 #endif
1372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1373 { "anon_thp", NR_ANON_THPS },
1374 { "file_thp", NR_FILE_THPS },
1375 { "shmem_thp", NR_SHMEM_THPS },
1376 #endif
1377 { "inactive_anon", NR_INACTIVE_ANON },
1378 { "active_anon", NR_ACTIVE_ANON },
1379 { "inactive_file", NR_INACTIVE_FILE },
1380 { "active_file", NR_ACTIVE_FILE },
1381 { "unevictable", NR_UNEVICTABLE },
1382 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1383 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1384 #ifdef CONFIG_HUGETLB_PAGE
1385 { "hugetlb", NR_HUGETLB },
1386 #endif
1387
1388 /* The memory events */
1389 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1390 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1391 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1392 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1393 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1394 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1395 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1396
1397 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1398 { "pgdemote_direct", PGDEMOTE_DIRECT },
1399 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1400 { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
1401 #ifdef CONFIG_NUMA_BALANCING
1402 { "pgpromote_success", PGPROMOTE_SUCCESS },
1403 #endif
1404 };
1405
1406 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1407 static int memcg_page_state_unit(int item)
1408 {
1409 switch (item) {
1410 case MEMCG_PERCPU_B:
1411 case MEMCG_ZSWAP_B:
1412 case NR_SLAB_RECLAIMABLE_B:
1413 case NR_SLAB_UNRECLAIMABLE_B:
1414 return 1;
1415 case NR_KERNEL_STACK_KB:
1416 return SZ_1K;
1417 default:
1418 return PAGE_SIZE;
1419 }
1420 }
1421
1422 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1423 static int memcg_page_state_output_unit(int item)
1424 {
1425 /*
1426 * Workingset state is actually in pages, but we export it to userspace
1427 * as a scalar count of events, so special case it here.
1428 *
1429 * Demotion and promotion activities are exported in pages, consistent
1430 * with their global counterparts.
1431 */
1432 switch (item) {
1433 case WORKINGSET_REFAULT_ANON:
1434 case WORKINGSET_REFAULT_FILE:
1435 case WORKINGSET_ACTIVATE_ANON:
1436 case WORKINGSET_ACTIVATE_FILE:
1437 case WORKINGSET_RESTORE_ANON:
1438 case WORKINGSET_RESTORE_FILE:
1439 case WORKINGSET_NODERECLAIM:
1440 case PGDEMOTE_KSWAPD:
1441 case PGDEMOTE_DIRECT:
1442 case PGDEMOTE_KHUGEPAGED:
1443 case PGDEMOTE_PROACTIVE:
1444 #ifdef CONFIG_NUMA_BALANCING
1445 case PGPROMOTE_SUCCESS:
1446 #endif
1447 return 1;
1448 default:
1449 return memcg_page_state_unit(item);
1450 }
1451 }
1452
memcg_page_state_output(struct mem_cgroup * memcg,int item)1453 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1454 {
1455 return memcg_page_state(memcg, item) *
1456 memcg_page_state_output_unit(item);
1457 }
1458
1459 #ifdef CONFIG_MEMCG_V1
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1460 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1461 {
1462 return memcg_page_state_local(memcg, item) *
1463 memcg_page_state_output_unit(item);
1464 }
1465 #endif
1466
1467 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1468 static bool memcg_accounts_hugetlb(void)
1469 {
1470 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1471 }
1472 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1473 static bool memcg_accounts_hugetlb(void)
1474 {
1475 return false;
1476 }
1477 #endif /* CONFIG_HUGETLB_PAGE */
1478
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1479 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1480 {
1481 int i;
1482
1483 /*
1484 * Provide statistics on the state of the memory subsystem as
1485 * well as cumulative event counters that show past behavior.
1486 *
1487 * This list is ordered following a combination of these gradients:
1488 * 1) generic big picture -> specifics and details
1489 * 2) reflecting userspace activity -> reflecting kernel heuristics
1490 *
1491 * Current memory state:
1492 */
1493 mem_cgroup_flush_stats(memcg);
1494
1495 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1496 u64 size;
1497
1498 #ifdef CONFIG_HUGETLB_PAGE
1499 if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1500 !memcg_accounts_hugetlb())
1501 continue;
1502 #endif
1503 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1504 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1505
1506 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1507 size += memcg_page_state_output(memcg,
1508 NR_SLAB_RECLAIMABLE_B);
1509 seq_buf_printf(s, "slab %llu\n", size);
1510 }
1511 }
1512
1513 /* Accumulated memory events */
1514 seq_buf_printf(s, "pgscan %lu\n",
1515 memcg_events(memcg, PGSCAN_KSWAPD) +
1516 memcg_events(memcg, PGSCAN_DIRECT) +
1517 memcg_events(memcg, PGSCAN_PROACTIVE) +
1518 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1519 seq_buf_printf(s, "pgsteal %lu\n",
1520 memcg_events(memcg, PGSTEAL_KSWAPD) +
1521 memcg_events(memcg, PGSTEAL_DIRECT) +
1522 memcg_events(memcg, PGSTEAL_PROACTIVE) +
1523 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1524
1525 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1526 #ifdef CONFIG_MEMCG_V1
1527 if (memcg_vm_event_stat[i] == PGPGIN ||
1528 memcg_vm_event_stat[i] == PGPGOUT)
1529 continue;
1530 #endif
1531 seq_buf_printf(s, "%s %lu\n",
1532 vm_event_name(memcg_vm_event_stat[i]),
1533 memcg_events(memcg, memcg_vm_event_stat[i]));
1534 }
1535 }
1536
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1537 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1538 {
1539 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1540 memcg_stat_format(memcg, s);
1541 else
1542 memcg1_stat_format(memcg, s);
1543 if (seq_buf_has_overflowed(s))
1544 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1545 }
1546
1547 /**
1548 * mem_cgroup_print_oom_context: Print OOM information relevant to
1549 * memory controller.
1550 * @memcg: The memory cgroup that went over limit
1551 * @p: Task that is going to be killed
1552 *
1553 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1554 * enabled
1555 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1556 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1557 {
1558 rcu_read_lock();
1559
1560 if (memcg) {
1561 pr_cont(",oom_memcg=");
1562 pr_cont_cgroup_path(memcg->css.cgroup);
1563 } else
1564 pr_cont(",global_oom");
1565 if (p) {
1566 pr_cont(",task_memcg=");
1567 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1568 }
1569 rcu_read_unlock();
1570 }
1571
1572 /**
1573 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1574 * memory controller.
1575 * @memcg: The memory cgroup that went over limit
1576 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1577 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1578 {
1579 /* Use static buffer, for the caller is holding oom_lock. */
1580 static char buf[SEQ_BUF_SIZE];
1581 struct seq_buf s;
1582 unsigned long memory_failcnt;
1583
1584 lockdep_assert_held(&oom_lock);
1585
1586 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1587 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1588 else
1589 memory_failcnt = memcg->memory.failcnt;
1590
1591 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1592 K((u64)page_counter_read(&memcg->memory)),
1593 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1594 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1595 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1596 K((u64)page_counter_read(&memcg->swap)),
1597 K((u64)READ_ONCE(memcg->swap.max)),
1598 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1599 #ifdef CONFIG_MEMCG_V1
1600 else {
1601 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1602 K((u64)page_counter_read(&memcg->memsw)),
1603 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1604 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1605 K((u64)page_counter_read(&memcg->kmem)),
1606 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1607 }
1608 #endif
1609
1610 pr_info("Memory cgroup stats for ");
1611 pr_cont_cgroup_path(memcg->css.cgroup);
1612 pr_cont(":");
1613 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1614 memory_stat_format(memcg, &s);
1615 seq_buf_do_printk(&s, KERN_INFO);
1616 }
1617
1618 /*
1619 * Return the memory (and swap, if configured) limit for a memcg.
1620 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1621 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1622 {
1623 unsigned long max = READ_ONCE(memcg->memory.max);
1624
1625 if (do_memsw_account()) {
1626 if (mem_cgroup_swappiness(memcg)) {
1627 /* Calculate swap excess capacity from memsw limit */
1628 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1629
1630 max += min(swap, (unsigned long)total_swap_pages);
1631 }
1632 } else {
1633 if (mem_cgroup_swappiness(memcg))
1634 max += min(READ_ONCE(memcg->swap.max),
1635 (unsigned long)total_swap_pages);
1636 }
1637 return max;
1638 }
1639
mem_cgroup_size(struct mem_cgroup * memcg)1640 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1641 {
1642 return page_counter_read(&memcg->memory);
1643 }
1644
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1645 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1646 int order)
1647 {
1648 struct oom_control oc = {
1649 .zonelist = NULL,
1650 .nodemask = NULL,
1651 .memcg = memcg,
1652 .gfp_mask = gfp_mask,
1653 .order = order,
1654 };
1655 bool ret = true;
1656
1657 if (mutex_lock_killable(&oom_lock))
1658 return true;
1659
1660 if (mem_cgroup_margin(memcg) >= (1 << order))
1661 goto unlock;
1662
1663 /*
1664 * A few threads which were not waiting at mutex_lock_killable() can
1665 * fail to bail out. Therefore, check again after holding oom_lock.
1666 */
1667 ret = task_is_dying() || out_of_memory(&oc);
1668
1669 unlock:
1670 mutex_unlock(&oom_lock);
1671 return ret;
1672 }
1673
1674 /*
1675 * Returns true if successfully killed one or more processes. Though in some
1676 * corner cases it can return true even without killing any process.
1677 */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1678 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1679 {
1680 bool locked, ret;
1681
1682 if (order > PAGE_ALLOC_COSTLY_ORDER)
1683 return false;
1684
1685 memcg_memory_event(memcg, MEMCG_OOM);
1686
1687 if (!memcg1_oom_prepare(memcg, &locked))
1688 return false;
1689
1690 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1691
1692 memcg1_oom_finish(memcg, locked);
1693
1694 return ret;
1695 }
1696
1697 /**
1698 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1699 * @victim: task to be killed by the OOM killer
1700 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1701 *
1702 * Returns a pointer to a memory cgroup, which has to be cleaned up
1703 * by killing all belonging OOM-killable tasks.
1704 *
1705 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1706 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1707 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1708 struct mem_cgroup *oom_domain)
1709 {
1710 struct mem_cgroup *oom_group = NULL;
1711 struct mem_cgroup *memcg;
1712
1713 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1714 return NULL;
1715
1716 if (!oom_domain)
1717 oom_domain = root_mem_cgroup;
1718
1719 rcu_read_lock();
1720
1721 memcg = mem_cgroup_from_task(victim);
1722 if (mem_cgroup_is_root(memcg))
1723 goto out;
1724
1725 /*
1726 * If the victim task has been asynchronously moved to a different
1727 * memory cgroup, we might end up killing tasks outside oom_domain.
1728 * In this case it's better to ignore memory.group.oom.
1729 */
1730 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1731 goto out;
1732
1733 /*
1734 * Traverse the memory cgroup hierarchy from the victim task's
1735 * cgroup up to the OOMing cgroup (or root) to find the
1736 * highest-level memory cgroup with oom.group set.
1737 */
1738 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1739 if (READ_ONCE(memcg->oom_group))
1740 oom_group = memcg;
1741
1742 if (memcg == oom_domain)
1743 break;
1744 }
1745
1746 if (oom_group)
1747 css_get(&oom_group->css);
1748 out:
1749 rcu_read_unlock();
1750
1751 return oom_group;
1752 }
1753
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1754 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1755 {
1756 pr_info("Tasks in ");
1757 pr_cont_cgroup_path(memcg->css.cgroup);
1758 pr_cont(" are going to be killed due to memory.oom.group set\n");
1759 }
1760
1761 struct memcg_stock_pcp {
1762 localtry_lock_t stock_lock;
1763 struct mem_cgroup *cached; /* this never be root cgroup */
1764 unsigned int nr_pages;
1765
1766 struct obj_cgroup *cached_objcg;
1767 struct pglist_data *cached_pgdat;
1768 unsigned int nr_bytes;
1769 int nr_slab_reclaimable_b;
1770 int nr_slab_unreclaimable_b;
1771
1772 struct work_struct work;
1773 unsigned long flags;
1774 #define FLUSHING_CACHED_CHARGE 0
1775 };
1776 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1777 .stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
1778 };
1779 static DEFINE_MUTEX(percpu_charge_mutex);
1780
1781 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1782 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1783 struct mem_cgroup *root_memcg);
1784
1785 /**
1786 * consume_stock: Try to consume stocked charge on this cpu.
1787 * @memcg: memcg to consume from.
1788 * @nr_pages: how many pages to charge.
1789 * @gfp_mask: allocation mask.
1790 *
1791 * The charges will only happen if @memcg matches the current cpu's memcg
1792 * stock, and at least @nr_pages are available in that stock. Failure to
1793 * service an allocation will refill the stock.
1794 *
1795 * returns true if successful, false otherwise.
1796 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1797 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
1798 gfp_t gfp_mask)
1799 {
1800 struct memcg_stock_pcp *stock;
1801 unsigned int stock_pages;
1802 unsigned long flags;
1803 bool ret = false;
1804
1805 if (nr_pages > MEMCG_CHARGE_BATCH)
1806 return ret;
1807
1808 if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1809 if (!gfpflags_allow_spinning(gfp_mask))
1810 return ret;
1811 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
1812 }
1813
1814 stock = this_cpu_ptr(&memcg_stock);
1815 stock_pages = READ_ONCE(stock->nr_pages);
1816 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1817 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1818 ret = true;
1819 }
1820
1821 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1822
1823 return ret;
1824 }
1825
1826 /*
1827 * Returns stocks cached in percpu and reset cached information.
1828 */
drain_stock(struct memcg_stock_pcp * stock)1829 static void drain_stock(struct memcg_stock_pcp *stock)
1830 {
1831 unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1832 struct mem_cgroup *old = READ_ONCE(stock->cached);
1833
1834 if (!old)
1835 return;
1836
1837 if (stock_pages) {
1838 page_counter_uncharge(&old->memory, stock_pages);
1839 if (do_memsw_account())
1840 page_counter_uncharge(&old->memsw, stock_pages);
1841
1842 WRITE_ONCE(stock->nr_pages, 0);
1843 }
1844
1845 css_put(&old->css);
1846 WRITE_ONCE(stock->cached, NULL);
1847 }
1848
drain_local_stock(struct work_struct * dummy)1849 static void drain_local_stock(struct work_struct *dummy)
1850 {
1851 struct memcg_stock_pcp *stock;
1852 struct obj_cgroup *old = NULL;
1853 unsigned long flags;
1854
1855 /*
1856 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1857 * drain_stock races is that we always operate on local CPU stock
1858 * here with IRQ disabled
1859 */
1860 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
1861
1862 stock = this_cpu_ptr(&memcg_stock);
1863 old = drain_obj_stock(stock);
1864 drain_stock(stock);
1865 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1866
1867 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1868 obj_cgroup_put(old);
1869 }
1870
1871 /*
1872 * Cache charges(val) to local per_cpu area.
1873 * This will be consumed by consume_stock() function, later.
1874 */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1875 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1876 {
1877 struct memcg_stock_pcp *stock;
1878 unsigned int stock_pages;
1879
1880 stock = this_cpu_ptr(&memcg_stock);
1881 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1882 drain_stock(stock);
1883 css_get(&memcg->css);
1884 WRITE_ONCE(stock->cached, memcg);
1885 }
1886 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1887 WRITE_ONCE(stock->nr_pages, stock_pages);
1888
1889 if (stock_pages > MEMCG_CHARGE_BATCH)
1890 drain_stock(stock);
1891 }
1892
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1893 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1894 {
1895 unsigned long flags;
1896
1897 if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1898 /*
1899 * In case of unlikely failure to lock percpu stock_lock
1900 * uncharge memcg directly.
1901 */
1902 if (mem_cgroup_is_root(memcg))
1903 return;
1904 page_counter_uncharge(&memcg->memory, nr_pages);
1905 if (do_memsw_account())
1906 page_counter_uncharge(&memcg->memsw, nr_pages);
1907 return;
1908 }
1909 __refill_stock(memcg, nr_pages);
1910 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1911 }
1912
1913 /*
1914 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1915 * of the hierarchy under it.
1916 */
drain_all_stock(struct mem_cgroup * root_memcg)1917 void drain_all_stock(struct mem_cgroup *root_memcg)
1918 {
1919 int cpu, curcpu;
1920
1921 /* If someone's already draining, avoid adding running more workers. */
1922 if (!mutex_trylock(&percpu_charge_mutex))
1923 return;
1924 /*
1925 * Notify other cpus that system-wide "drain" is running
1926 * We do not care about races with the cpu hotplug because cpu down
1927 * as well as workers from this path always operate on the local
1928 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1929 */
1930 migrate_disable();
1931 curcpu = smp_processor_id();
1932 for_each_online_cpu(cpu) {
1933 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1934 struct mem_cgroup *memcg;
1935 bool flush = false;
1936
1937 rcu_read_lock();
1938 memcg = READ_ONCE(stock->cached);
1939 if (memcg && READ_ONCE(stock->nr_pages) &&
1940 mem_cgroup_is_descendant(memcg, root_memcg))
1941 flush = true;
1942 else if (obj_stock_flush_required(stock, root_memcg))
1943 flush = true;
1944 rcu_read_unlock();
1945
1946 if (flush &&
1947 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1948 if (cpu == curcpu)
1949 drain_local_stock(&stock->work);
1950 else if (!cpu_is_isolated(cpu))
1951 schedule_work_on(cpu, &stock->work);
1952 }
1953 }
1954 migrate_enable();
1955 mutex_unlock(&percpu_charge_mutex);
1956 }
1957
memcg_hotplug_cpu_dead(unsigned int cpu)1958 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1959 {
1960 struct memcg_stock_pcp *stock;
1961 struct obj_cgroup *old;
1962 unsigned long flags;
1963
1964 stock = &per_cpu(memcg_stock, cpu);
1965
1966 /* drain_obj_stock requires stock_lock */
1967 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
1968 old = drain_obj_stock(stock);
1969 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1970
1971 drain_stock(stock);
1972 obj_cgroup_put(old);
1973
1974 return 0;
1975 }
1976
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1977 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1978 unsigned int nr_pages,
1979 gfp_t gfp_mask)
1980 {
1981 unsigned long nr_reclaimed = 0;
1982
1983 do {
1984 unsigned long pflags;
1985
1986 if (page_counter_read(&memcg->memory) <=
1987 READ_ONCE(memcg->memory.high))
1988 continue;
1989
1990 memcg_memory_event(memcg, MEMCG_HIGH);
1991
1992 psi_memstall_enter(&pflags);
1993 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1994 gfp_mask,
1995 MEMCG_RECLAIM_MAY_SWAP,
1996 NULL);
1997 psi_memstall_leave(&pflags);
1998 } while ((memcg = parent_mem_cgroup(memcg)) &&
1999 !mem_cgroup_is_root(memcg));
2000
2001 return nr_reclaimed;
2002 }
2003
high_work_func(struct work_struct * work)2004 static void high_work_func(struct work_struct *work)
2005 {
2006 struct mem_cgroup *memcg;
2007
2008 memcg = container_of(work, struct mem_cgroup, high_work);
2009 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2010 }
2011
2012 /*
2013 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2014 * enough to still cause a significant slowdown in most cases, while still
2015 * allowing diagnostics and tracing to proceed without becoming stuck.
2016 */
2017 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2018
2019 /*
2020 * When calculating the delay, we use these either side of the exponentiation to
2021 * maintain precision and scale to a reasonable number of jiffies (see the table
2022 * below.
2023 *
2024 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2025 * overage ratio to a delay.
2026 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2027 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2028 * to produce a reasonable delay curve.
2029 *
2030 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2031 * reasonable delay curve compared to precision-adjusted overage, not
2032 * penalising heavily at first, but still making sure that growth beyond the
2033 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2034 * example, with a high of 100 megabytes:
2035 *
2036 * +-------+------------------------+
2037 * | usage | time to allocate in ms |
2038 * +-------+------------------------+
2039 * | 100M | 0 |
2040 * | 101M | 6 |
2041 * | 102M | 25 |
2042 * | 103M | 57 |
2043 * | 104M | 102 |
2044 * | 105M | 159 |
2045 * | 106M | 230 |
2046 * | 107M | 313 |
2047 * | 108M | 409 |
2048 * | 109M | 518 |
2049 * | 110M | 639 |
2050 * | 111M | 774 |
2051 * | 112M | 921 |
2052 * | 113M | 1081 |
2053 * | 114M | 1254 |
2054 * | 115M | 1439 |
2055 * | 116M | 1638 |
2056 * | 117M | 1849 |
2057 * | 118M | 2000 |
2058 * | 119M | 2000 |
2059 * | 120M | 2000 |
2060 * +-------+------------------------+
2061 */
2062 #define MEMCG_DELAY_PRECISION_SHIFT 20
2063 #define MEMCG_DELAY_SCALING_SHIFT 14
2064
calculate_overage(unsigned long usage,unsigned long high)2065 static u64 calculate_overage(unsigned long usage, unsigned long high)
2066 {
2067 u64 overage;
2068
2069 if (usage <= high)
2070 return 0;
2071
2072 /*
2073 * Prevent division by 0 in overage calculation by acting as if
2074 * it was a threshold of 1 page
2075 */
2076 high = max(high, 1UL);
2077
2078 overage = usage - high;
2079 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2080 return div64_u64(overage, high);
2081 }
2082
mem_find_max_overage(struct mem_cgroup * memcg)2083 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2084 {
2085 u64 overage, max_overage = 0;
2086
2087 do {
2088 overage = calculate_overage(page_counter_read(&memcg->memory),
2089 READ_ONCE(memcg->memory.high));
2090 max_overage = max(overage, max_overage);
2091 } while ((memcg = parent_mem_cgroup(memcg)) &&
2092 !mem_cgroup_is_root(memcg));
2093
2094 return max_overage;
2095 }
2096
swap_find_max_overage(struct mem_cgroup * memcg)2097 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2098 {
2099 u64 overage, max_overage = 0;
2100
2101 do {
2102 overage = calculate_overage(page_counter_read(&memcg->swap),
2103 READ_ONCE(memcg->swap.high));
2104 if (overage)
2105 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2106 max_overage = max(overage, max_overage);
2107 } while ((memcg = parent_mem_cgroup(memcg)) &&
2108 !mem_cgroup_is_root(memcg));
2109
2110 return max_overage;
2111 }
2112
2113 /*
2114 * Get the number of jiffies that we should penalise a mischievous cgroup which
2115 * is exceeding its memory.high by checking both it and its ancestors.
2116 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2117 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2118 unsigned int nr_pages,
2119 u64 max_overage)
2120 {
2121 unsigned long penalty_jiffies;
2122
2123 if (!max_overage)
2124 return 0;
2125
2126 /*
2127 * We use overage compared to memory.high to calculate the number of
2128 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2129 * fairly lenient on small overages, and increasingly harsh when the
2130 * memcg in question makes it clear that it has no intention of stopping
2131 * its crazy behaviour, so we exponentially increase the delay based on
2132 * overage amount.
2133 */
2134 penalty_jiffies = max_overage * max_overage * HZ;
2135 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2136 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2137
2138 /*
2139 * Factor in the task's own contribution to the overage, such that four
2140 * N-sized allocations are throttled approximately the same as one
2141 * 4N-sized allocation.
2142 *
2143 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2144 * larger the current charge patch is than that.
2145 */
2146 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2147 }
2148
2149 /*
2150 * Reclaims memory over the high limit. Called directly from
2151 * try_charge() (context permitting), as well as from the userland
2152 * return path where reclaim is always able to block.
2153 */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2154 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2155 {
2156 unsigned long penalty_jiffies;
2157 unsigned long pflags;
2158 unsigned long nr_reclaimed;
2159 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2160 int nr_retries = MAX_RECLAIM_RETRIES;
2161 struct mem_cgroup *memcg;
2162 bool in_retry = false;
2163
2164 if (likely(!nr_pages))
2165 return;
2166
2167 memcg = get_mem_cgroup_from_mm(current->mm);
2168 current->memcg_nr_pages_over_high = 0;
2169
2170 retry_reclaim:
2171 /*
2172 * Bail if the task is already exiting. Unlike memory.max,
2173 * memory.high enforcement isn't as strict, and there is no
2174 * OOM killer involved, which means the excess could already
2175 * be much bigger (and still growing) than it could for
2176 * memory.max; the dying task could get stuck in fruitless
2177 * reclaim for a long time, which isn't desirable.
2178 */
2179 if (task_is_dying())
2180 goto out;
2181
2182 /*
2183 * The allocating task should reclaim at least the batch size, but for
2184 * subsequent retries we only want to do what's necessary to prevent oom
2185 * or breaching resource isolation.
2186 *
2187 * This is distinct from memory.max or page allocator behaviour because
2188 * memory.high is currently batched, whereas memory.max and the page
2189 * allocator run every time an allocation is made.
2190 */
2191 nr_reclaimed = reclaim_high(memcg,
2192 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2193 gfp_mask);
2194
2195 /*
2196 * memory.high is breached and reclaim is unable to keep up. Throttle
2197 * allocators proactively to slow down excessive growth.
2198 */
2199 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2200 mem_find_max_overage(memcg));
2201
2202 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2203 swap_find_max_overage(memcg));
2204
2205 /*
2206 * Clamp the max delay per usermode return so as to still keep the
2207 * application moving forwards and also permit diagnostics, albeit
2208 * extremely slowly.
2209 */
2210 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2211
2212 /*
2213 * Don't sleep if the amount of jiffies this memcg owes us is so low
2214 * that it's not even worth doing, in an attempt to be nice to those who
2215 * go only a small amount over their memory.high value and maybe haven't
2216 * been aggressively reclaimed enough yet.
2217 */
2218 if (penalty_jiffies <= HZ / 100)
2219 goto out;
2220
2221 /*
2222 * If reclaim is making forward progress but we're still over
2223 * memory.high, we want to encourage that rather than doing allocator
2224 * throttling.
2225 */
2226 if (nr_reclaimed || nr_retries--) {
2227 in_retry = true;
2228 goto retry_reclaim;
2229 }
2230
2231 /*
2232 * Reclaim didn't manage to push usage below the limit, slow
2233 * this allocating task down.
2234 *
2235 * If we exit early, we're guaranteed to die (since
2236 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2237 * need to account for any ill-begotten jiffies to pay them off later.
2238 */
2239 psi_memstall_enter(&pflags);
2240 schedule_timeout_killable(penalty_jiffies);
2241 psi_memstall_leave(&pflags);
2242
2243 out:
2244 css_put(&memcg->css);
2245 }
2246
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2247 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2248 unsigned int nr_pages)
2249 {
2250 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2251 int nr_retries = MAX_RECLAIM_RETRIES;
2252 struct mem_cgroup *mem_over_limit;
2253 struct page_counter *counter;
2254 unsigned long nr_reclaimed;
2255 bool passed_oom = false;
2256 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2257 bool drained = false;
2258 bool raised_max_event = false;
2259 unsigned long pflags;
2260
2261 retry:
2262 if (consume_stock(memcg, nr_pages, gfp_mask))
2263 return 0;
2264
2265 if (!gfpflags_allow_spinning(gfp_mask))
2266 /* Avoid the refill and flush of the older stock */
2267 batch = nr_pages;
2268
2269 if (!do_memsw_account() ||
2270 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2271 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2272 goto done_restock;
2273 if (do_memsw_account())
2274 page_counter_uncharge(&memcg->memsw, batch);
2275 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2276 } else {
2277 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2278 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2279 }
2280
2281 if (batch > nr_pages) {
2282 batch = nr_pages;
2283 goto retry;
2284 }
2285
2286 /*
2287 * Prevent unbounded recursion when reclaim operations need to
2288 * allocate memory. This might exceed the limits temporarily,
2289 * but we prefer facilitating memory reclaim and getting back
2290 * under the limit over triggering OOM kills in these cases.
2291 */
2292 if (unlikely(current->flags & PF_MEMALLOC))
2293 goto force;
2294
2295 if (unlikely(task_in_memcg_oom(current)))
2296 goto nomem;
2297
2298 if (!gfpflags_allow_blocking(gfp_mask))
2299 goto nomem;
2300
2301 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2302 raised_max_event = true;
2303
2304 psi_memstall_enter(&pflags);
2305 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2306 gfp_mask, reclaim_options, NULL);
2307 psi_memstall_leave(&pflags);
2308
2309 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2310 goto retry;
2311
2312 if (!drained) {
2313 drain_all_stock(mem_over_limit);
2314 drained = true;
2315 goto retry;
2316 }
2317
2318 if (gfp_mask & __GFP_NORETRY)
2319 goto nomem;
2320 /*
2321 * Even though the limit is exceeded at this point, reclaim
2322 * may have been able to free some pages. Retry the charge
2323 * before killing the task.
2324 *
2325 * Only for regular pages, though: huge pages are rather
2326 * unlikely to succeed so close to the limit, and we fall back
2327 * to regular pages anyway in case of failure.
2328 */
2329 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2330 goto retry;
2331
2332 if (nr_retries--)
2333 goto retry;
2334
2335 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2336 goto nomem;
2337
2338 /* Avoid endless loop for tasks bypassed by the oom killer */
2339 if (passed_oom && task_is_dying())
2340 goto nomem;
2341
2342 /*
2343 * keep retrying as long as the memcg oom killer is able to make
2344 * a forward progress or bypass the charge if the oom killer
2345 * couldn't make any progress.
2346 */
2347 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2348 get_order(nr_pages * PAGE_SIZE))) {
2349 passed_oom = true;
2350 nr_retries = MAX_RECLAIM_RETRIES;
2351 goto retry;
2352 }
2353 nomem:
2354 /*
2355 * Memcg doesn't have a dedicated reserve for atomic
2356 * allocations. But like the global atomic pool, we need to
2357 * put the burden of reclaim on regular allocation requests
2358 * and let these go through as privileged allocations.
2359 */
2360 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2361 return -ENOMEM;
2362 force:
2363 /*
2364 * If the allocation has to be enforced, don't forget to raise
2365 * a MEMCG_MAX event.
2366 */
2367 if (!raised_max_event)
2368 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2369
2370 /*
2371 * The allocation either can't fail or will lead to more memory
2372 * being freed very soon. Allow memory usage go over the limit
2373 * temporarily by force charging it.
2374 */
2375 page_counter_charge(&memcg->memory, nr_pages);
2376 if (do_memsw_account())
2377 page_counter_charge(&memcg->memsw, nr_pages);
2378
2379 return 0;
2380
2381 done_restock:
2382 if (batch > nr_pages)
2383 refill_stock(memcg, batch - nr_pages);
2384
2385 /*
2386 * If the hierarchy is above the normal consumption range, schedule
2387 * reclaim on returning to userland. We can perform reclaim here
2388 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2389 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2390 * not recorded as it most likely matches current's and won't
2391 * change in the meantime. As high limit is checked again before
2392 * reclaim, the cost of mismatch is negligible.
2393 */
2394 do {
2395 bool mem_high, swap_high;
2396
2397 mem_high = page_counter_read(&memcg->memory) >
2398 READ_ONCE(memcg->memory.high);
2399 swap_high = page_counter_read(&memcg->swap) >
2400 READ_ONCE(memcg->swap.high);
2401
2402 /* Don't bother a random interrupted task */
2403 if (!in_task()) {
2404 if (mem_high) {
2405 schedule_work(&memcg->high_work);
2406 break;
2407 }
2408 continue;
2409 }
2410
2411 if (mem_high || swap_high) {
2412 /*
2413 * The allocating tasks in this cgroup will need to do
2414 * reclaim or be throttled to prevent further growth
2415 * of the memory or swap footprints.
2416 *
2417 * Target some best-effort fairness between the tasks,
2418 * and distribute reclaim work and delay penalties
2419 * based on how much each task is actually allocating.
2420 */
2421 current->memcg_nr_pages_over_high += batch;
2422 set_notify_resume(current);
2423 break;
2424 }
2425 } while ((memcg = parent_mem_cgroup(memcg)));
2426
2427 /*
2428 * Reclaim is set up above to be called from the userland
2429 * return path. But also attempt synchronous reclaim to avoid
2430 * excessive overrun while the task is still inside the
2431 * kernel. If this is successful, the return path will see it
2432 * when it rechecks the overage and simply bail out.
2433 */
2434 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2435 !(current->flags & PF_MEMALLOC) &&
2436 gfpflags_allow_blocking(gfp_mask))
2437 mem_cgroup_handle_over_high(gfp_mask);
2438 return 0;
2439 }
2440
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2441 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2442 unsigned int nr_pages)
2443 {
2444 if (mem_cgroup_is_root(memcg))
2445 return 0;
2446
2447 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2448 }
2449
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2450 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2451 {
2452 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2453 /*
2454 * Any of the following ensures page's memcg stability:
2455 *
2456 * - the page lock
2457 * - LRU isolation
2458 * - exclusive reference
2459 */
2460 folio->memcg_data = (unsigned long)memcg;
2461 }
2462
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2463 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2464 struct pglist_data *pgdat,
2465 enum node_stat_item idx, int nr)
2466 {
2467 struct mem_cgroup *memcg;
2468 struct lruvec *lruvec;
2469
2470 rcu_read_lock();
2471 memcg = obj_cgroup_memcg(objcg);
2472 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2473 __mod_memcg_lruvec_state(lruvec, idx, nr);
2474 rcu_read_unlock();
2475 }
2476
2477 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2478 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2479 {
2480 /*
2481 * Slab objects are accounted individually, not per-page.
2482 * Memcg membership data for each individual object is saved in
2483 * slab->obj_exts.
2484 */
2485 if (folio_test_slab(folio)) {
2486 struct slabobj_ext *obj_exts;
2487 struct slab *slab;
2488 unsigned int off;
2489
2490 slab = folio_slab(folio);
2491 obj_exts = slab_obj_exts(slab);
2492 if (!obj_exts)
2493 return NULL;
2494
2495 off = obj_to_index(slab->slab_cache, slab, p);
2496 if (obj_exts[off].objcg)
2497 return obj_cgroup_memcg(obj_exts[off].objcg);
2498
2499 return NULL;
2500 }
2501
2502 /*
2503 * folio_memcg_check() is used here, because in theory we can encounter
2504 * a folio where the slab flag has been cleared already, but
2505 * slab->obj_exts has not been freed yet
2506 * folio_memcg_check() will guarantee that a proper memory
2507 * cgroup pointer or NULL will be returned.
2508 */
2509 return folio_memcg_check(folio);
2510 }
2511
2512 /*
2513 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2514 * It is not suitable for objects allocated using vmalloc().
2515 *
2516 * A passed kernel object must be a slab object or a generic kernel page.
2517 *
2518 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2519 * cgroup_mutex, etc.
2520 */
mem_cgroup_from_slab_obj(void * p)2521 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2522 {
2523 if (mem_cgroup_disabled())
2524 return NULL;
2525
2526 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2527 }
2528
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2529 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2530 {
2531 struct obj_cgroup *objcg = NULL;
2532
2533 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2534 objcg = rcu_dereference(memcg->objcg);
2535 if (likely(objcg && obj_cgroup_tryget(objcg)))
2536 break;
2537 objcg = NULL;
2538 }
2539 return objcg;
2540 }
2541
current_objcg_update(void)2542 static struct obj_cgroup *current_objcg_update(void)
2543 {
2544 struct mem_cgroup *memcg;
2545 struct obj_cgroup *old, *objcg = NULL;
2546
2547 do {
2548 /* Atomically drop the update bit. */
2549 old = xchg(¤t->objcg, NULL);
2550 if (old) {
2551 old = (struct obj_cgroup *)
2552 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2553 obj_cgroup_put(old);
2554
2555 old = NULL;
2556 }
2557
2558 /* If new objcg is NULL, no reason for the second atomic update. */
2559 if (!current->mm || (current->flags & PF_KTHREAD))
2560 return NULL;
2561
2562 /*
2563 * Release the objcg pointer from the previous iteration,
2564 * if try_cmpxcg() below fails.
2565 */
2566 if (unlikely(objcg)) {
2567 obj_cgroup_put(objcg);
2568 objcg = NULL;
2569 }
2570
2571 /*
2572 * Obtain the new objcg pointer. The current task can be
2573 * asynchronously moved to another memcg and the previous
2574 * memcg can be offlined. So let's get the memcg pointer
2575 * and try get a reference to objcg under a rcu read lock.
2576 */
2577
2578 rcu_read_lock();
2579 memcg = mem_cgroup_from_task(current);
2580 objcg = __get_obj_cgroup_from_memcg(memcg);
2581 rcu_read_unlock();
2582
2583 /*
2584 * Try set up a new objcg pointer atomically. If it
2585 * fails, it means the update flag was set concurrently, so
2586 * the whole procedure should be repeated.
2587 */
2588 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2589
2590 return objcg;
2591 }
2592
current_obj_cgroup(void)2593 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2594 {
2595 struct mem_cgroup *memcg;
2596 struct obj_cgroup *objcg;
2597
2598 if (in_task()) {
2599 memcg = current->active_memcg;
2600 if (unlikely(memcg))
2601 goto from_memcg;
2602
2603 objcg = READ_ONCE(current->objcg);
2604 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2605 objcg = current_objcg_update();
2606 /*
2607 * Objcg reference is kept by the task, so it's safe
2608 * to use the objcg by the current task.
2609 */
2610 return objcg;
2611 }
2612
2613 memcg = this_cpu_read(int_active_memcg);
2614 if (unlikely(memcg))
2615 goto from_memcg;
2616
2617 return NULL;
2618
2619 from_memcg:
2620 objcg = NULL;
2621 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2622 /*
2623 * Memcg pointer is protected by scope (see set_active_memcg())
2624 * and is pinning the corresponding objcg, so objcg can't go
2625 * away and can be used within the scope without any additional
2626 * protection.
2627 */
2628 objcg = rcu_dereference_check(memcg->objcg, 1);
2629 if (likely(objcg))
2630 break;
2631 }
2632
2633 return objcg;
2634 }
2635
get_obj_cgroup_from_folio(struct folio * folio)2636 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2637 {
2638 struct obj_cgroup *objcg;
2639
2640 if (!memcg_kmem_online())
2641 return NULL;
2642
2643 if (folio_memcg_kmem(folio)) {
2644 objcg = __folio_objcg(folio);
2645 obj_cgroup_get(objcg);
2646 } else {
2647 struct mem_cgroup *memcg;
2648
2649 rcu_read_lock();
2650 memcg = __folio_memcg(folio);
2651 if (memcg)
2652 objcg = __get_obj_cgroup_from_memcg(memcg);
2653 else
2654 objcg = NULL;
2655 rcu_read_unlock();
2656 }
2657 return objcg;
2658 }
2659
2660 /*
2661 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2662 * @objcg: object cgroup to uncharge
2663 * @nr_pages: number of pages to uncharge
2664 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2665 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2666 unsigned int nr_pages)
2667 {
2668 struct mem_cgroup *memcg;
2669
2670 memcg = get_mem_cgroup_from_objcg(objcg);
2671
2672 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2673 memcg1_account_kmem(memcg, -nr_pages);
2674 if (!mem_cgroup_is_root(memcg))
2675 refill_stock(memcg, nr_pages);
2676
2677 css_put(&memcg->css);
2678 }
2679
2680 /*
2681 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2682 * @objcg: object cgroup to charge
2683 * @gfp: reclaim mode
2684 * @nr_pages: number of pages to charge
2685 *
2686 * Returns 0 on success, an error code on failure.
2687 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2688 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2689 unsigned int nr_pages)
2690 {
2691 struct mem_cgroup *memcg;
2692 int ret;
2693
2694 memcg = get_mem_cgroup_from_objcg(objcg);
2695
2696 ret = try_charge_memcg(memcg, gfp, nr_pages);
2697 if (ret)
2698 goto out;
2699
2700 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2701 memcg1_account_kmem(memcg, nr_pages);
2702 out:
2703 css_put(&memcg->css);
2704
2705 return ret;
2706 }
2707
page_objcg(const struct page * page)2708 static struct obj_cgroup *page_objcg(const struct page *page)
2709 {
2710 unsigned long memcg_data = page->memcg_data;
2711
2712 if (mem_cgroup_disabled() || !memcg_data)
2713 return NULL;
2714
2715 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2716 page);
2717 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2718 }
2719
page_set_objcg(struct page * page,const struct obj_cgroup * objcg)2720 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2721 {
2722 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2723 }
2724
2725 /**
2726 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2727 * @page: page to charge
2728 * @gfp: reclaim mode
2729 * @order: allocation order
2730 *
2731 * Returns 0 on success, an error code on failure.
2732 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2733 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2734 {
2735 struct obj_cgroup *objcg;
2736 int ret = 0;
2737
2738 objcg = current_obj_cgroup();
2739 if (objcg) {
2740 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2741 if (!ret) {
2742 obj_cgroup_get(objcg);
2743 page_set_objcg(page, objcg);
2744 return 0;
2745 }
2746 }
2747 return ret;
2748 }
2749
2750 /**
2751 * __memcg_kmem_uncharge_page: uncharge a kmem page
2752 * @page: page to uncharge
2753 * @order: allocation order
2754 */
__memcg_kmem_uncharge_page(struct page * page,int order)2755 void __memcg_kmem_uncharge_page(struct page *page, int order)
2756 {
2757 struct obj_cgroup *objcg = page_objcg(page);
2758 unsigned int nr_pages = 1 << order;
2759
2760 if (!objcg)
2761 return;
2762
2763 obj_cgroup_uncharge_pages(objcg, nr_pages);
2764 page->memcg_data = 0;
2765 obj_cgroup_put(objcg);
2766 }
2767
2768 /* Replace the stock objcg with objcg, return the old objcg */
replace_stock_objcg(struct memcg_stock_pcp * stock,struct obj_cgroup * objcg)2769 static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock,
2770 struct obj_cgroup *objcg)
2771 {
2772 struct obj_cgroup *old = NULL;
2773
2774 old = drain_obj_stock(stock);
2775 obj_cgroup_get(objcg);
2776 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2777 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2778 WRITE_ONCE(stock->cached_objcg, objcg);
2779 return old;
2780 }
2781
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2782 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2783 enum node_stat_item idx, int nr)
2784 {
2785 struct memcg_stock_pcp *stock;
2786 struct obj_cgroup *old = NULL;
2787 unsigned long flags;
2788 int *bytes;
2789
2790 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
2791 stock = this_cpu_ptr(&memcg_stock);
2792
2793 /*
2794 * Save vmstat data in stock and skip vmstat array update unless
2795 * accumulating over a page of vmstat data or when pgdat or idx
2796 * changes.
2797 */
2798 if (READ_ONCE(stock->cached_objcg) != objcg) {
2799 old = replace_stock_objcg(stock, objcg);
2800 stock->cached_pgdat = pgdat;
2801 } else if (stock->cached_pgdat != pgdat) {
2802 /* Flush the existing cached vmstat data */
2803 struct pglist_data *oldpg = stock->cached_pgdat;
2804
2805 if (stock->nr_slab_reclaimable_b) {
2806 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2807 stock->nr_slab_reclaimable_b);
2808 stock->nr_slab_reclaimable_b = 0;
2809 }
2810 if (stock->nr_slab_unreclaimable_b) {
2811 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2812 stock->nr_slab_unreclaimable_b);
2813 stock->nr_slab_unreclaimable_b = 0;
2814 }
2815 stock->cached_pgdat = pgdat;
2816 }
2817
2818 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2819 : &stock->nr_slab_unreclaimable_b;
2820 /*
2821 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2822 * cached locally at least once before pushing it out.
2823 */
2824 if (!*bytes) {
2825 *bytes = nr;
2826 nr = 0;
2827 } else {
2828 *bytes += nr;
2829 if (abs(*bytes) > PAGE_SIZE) {
2830 nr = *bytes;
2831 *bytes = 0;
2832 } else {
2833 nr = 0;
2834 }
2835 }
2836 if (nr)
2837 __mod_objcg_mlstate(objcg, pgdat, idx, nr);
2838
2839 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2840 obj_cgroup_put(old);
2841 }
2842
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2843 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2844 {
2845 struct memcg_stock_pcp *stock;
2846 unsigned long flags;
2847 bool ret = false;
2848
2849 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
2850
2851 stock = this_cpu_ptr(&memcg_stock);
2852 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2853 stock->nr_bytes -= nr_bytes;
2854 ret = true;
2855 }
2856
2857 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2858
2859 return ret;
2860 }
2861
drain_obj_stock(struct memcg_stock_pcp * stock)2862 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2863 {
2864 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2865
2866 if (!old)
2867 return NULL;
2868
2869 if (stock->nr_bytes) {
2870 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2871 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2872
2873 if (nr_pages) {
2874 struct mem_cgroup *memcg;
2875
2876 memcg = get_mem_cgroup_from_objcg(old);
2877
2878 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2879 memcg1_account_kmem(memcg, -nr_pages);
2880 __refill_stock(memcg, nr_pages);
2881
2882 css_put(&memcg->css);
2883 }
2884
2885 /*
2886 * The leftover is flushed to the centralized per-memcg value.
2887 * On the next attempt to refill obj stock it will be moved
2888 * to a per-cpu stock (probably, on an other CPU), see
2889 * refill_obj_stock().
2890 *
2891 * How often it's flushed is a trade-off between the memory
2892 * limit enforcement accuracy and potential CPU contention,
2893 * so it might be changed in the future.
2894 */
2895 atomic_add(nr_bytes, &old->nr_charged_bytes);
2896 stock->nr_bytes = 0;
2897 }
2898
2899 /*
2900 * Flush the vmstat data in current stock
2901 */
2902 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2903 if (stock->nr_slab_reclaimable_b) {
2904 __mod_objcg_mlstate(old, stock->cached_pgdat,
2905 NR_SLAB_RECLAIMABLE_B,
2906 stock->nr_slab_reclaimable_b);
2907 stock->nr_slab_reclaimable_b = 0;
2908 }
2909 if (stock->nr_slab_unreclaimable_b) {
2910 __mod_objcg_mlstate(old, stock->cached_pgdat,
2911 NR_SLAB_UNRECLAIMABLE_B,
2912 stock->nr_slab_unreclaimable_b);
2913 stock->nr_slab_unreclaimable_b = 0;
2914 }
2915 stock->cached_pgdat = NULL;
2916 }
2917
2918 WRITE_ONCE(stock->cached_objcg, NULL);
2919 /*
2920 * The `old' objects needs to be released by the caller via
2921 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2922 */
2923 return old;
2924 }
2925
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2926 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2927 struct mem_cgroup *root_memcg)
2928 {
2929 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2930 struct mem_cgroup *memcg;
2931
2932 if (objcg) {
2933 memcg = obj_cgroup_memcg(objcg);
2934 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2935 return true;
2936 }
2937
2938 return false;
2939 }
2940
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2941 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2942 bool allow_uncharge)
2943 {
2944 struct memcg_stock_pcp *stock;
2945 struct obj_cgroup *old = NULL;
2946 unsigned long flags;
2947 unsigned int nr_pages = 0;
2948
2949 localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
2950
2951 stock = this_cpu_ptr(&memcg_stock);
2952 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2953 old = replace_stock_objcg(stock, objcg);
2954 allow_uncharge = true; /* Allow uncharge when objcg changes */
2955 }
2956 stock->nr_bytes += nr_bytes;
2957
2958 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2959 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2960 stock->nr_bytes &= (PAGE_SIZE - 1);
2961 }
2962
2963 localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2964 obj_cgroup_put(old);
2965
2966 if (nr_pages)
2967 obj_cgroup_uncharge_pages(objcg, nr_pages);
2968 }
2969
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2970 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2971 {
2972 unsigned int nr_pages, nr_bytes;
2973 int ret;
2974
2975 if (consume_obj_stock(objcg, size))
2976 return 0;
2977
2978 /*
2979 * In theory, objcg->nr_charged_bytes can have enough
2980 * pre-charged bytes to satisfy the allocation. However,
2981 * flushing objcg->nr_charged_bytes requires two atomic
2982 * operations, and objcg->nr_charged_bytes can't be big.
2983 * The shared objcg->nr_charged_bytes can also become a
2984 * performance bottleneck if all tasks of the same memcg are
2985 * trying to update it. So it's better to ignore it and try
2986 * grab some new pages. The stock's nr_bytes will be flushed to
2987 * objcg->nr_charged_bytes later on when objcg changes.
2988 *
2989 * The stock's nr_bytes may contain enough pre-charged bytes
2990 * to allow one less page from being charged, but we can't rely
2991 * on the pre-charged bytes not being changed outside of
2992 * consume_obj_stock() or refill_obj_stock(). So ignore those
2993 * pre-charged bytes as well when charging pages. To avoid a
2994 * page uncharge right after a page charge, we set the
2995 * allow_uncharge flag to false when calling refill_obj_stock()
2996 * to temporarily allow the pre-charged bytes to exceed the page
2997 * size limit. The maximum reachable value of the pre-charged
2998 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2999 * race.
3000 */
3001 nr_pages = size >> PAGE_SHIFT;
3002 nr_bytes = size & (PAGE_SIZE - 1);
3003
3004 if (nr_bytes)
3005 nr_pages += 1;
3006
3007 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3008 if (!ret && nr_bytes)
3009 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3010
3011 return ret;
3012 }
3013
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3014 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3015 {
3016 refill_obj_stock(objcg, size, true);
3017 }
3018
obj_full_size(struct kmem_cache * s)3019 static inline size_t obj_full_size(struct kmem_cache *s)
3020 {
3021 /*
3022 * For each accounted object there is an extra space which is used
3023 * to store obj_cgroup membership. Charge it too.
3024 */
3025 return s->size + sizeof(struct obj_cgroup *);
3026 }
3027
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)3028 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3029 gfp_t flags, size_t size, void **p)
3030 {
3031 struct obj_cgroup *objcg;
3032 struct slab *slab;
3033 unsigned long off;
3034 size_t i;
3035
3036 /*
3037 * The obtained objcg pointer is safe to use within the current scope,
3038 * defined by current task or set_active_memcg() pair.
3039 * obj_cgroup_get() is used to get a permanent reference.
3040 */
3041 objcg = current_obj_cgroup();
3042 if (!objcg)
3043 return true;
3044
3045 /*
3046 * slab_alloc_node() avoids the NULL check, so we might be called with a
3047 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3048 * the whole requested size.
3049 * return success as there's nothing to free back
3050 */
3051 if (unlikely(*p == NULL))
3052 return true;
3053
3054 flags &= gfp_allowed_mask;
3055
3056 if (lru) {
3057 int ret;
3058 struct mem_cgroup *memcg;
3059
3060 memcg = get_mem_cgroup_from_objcg(objcg);
3061 ret = memcg_list_lru_alloc(memcg, lru, flags);
3062 css_put(&memcg->css);
3063
3064 if (ret)
3065 return false;
3066 }
3067
3068 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
3069 return false;
3070
3071 for (i = 0; i < size; i++) {
3072 slab = virt_to_slab(p[i]);
3073
3074 if (!slab_obj_exts(slab) &&
3075 alloc_slab_obj_exts(slab, s, flags, false)) {
3076 obj_cgroup_uncharge(objcg, obj_full_size(s));
3077 continue;
3078 }
3079
3080 off = obj_to_index(s, slab, p[i]);
3081 obj_cgroup_get(objcg);
3082 slab_obj_exts(slab)[off].objcg = objcg;
3083 mod_objcg_state(objcg, slab_pgdat(slab),
3084 cache_vmstat_idx(s), obj_full_size(s));
3085 }
3086
3087 return true;
3088 }
3089
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3090 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3091 void **p, int objects, struct slabobj_ext *obj_exts)
3092 {
3093 for (int i = 0; i < objects; i++) {
3094 struct obj_cgroup *objcg;
3095 unsigned int off;
3096
3097 off = obj_to_index(s, slab, p[i]);
3098 objcg = obj_exts[off].objcg;
3099 if (!objcg)
3100 continue;
3101
3102 obj_exts[off].objcg = NULL;
3103 obj_cgroup_uncharge(objcg, obj_full_size(s));
3104 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3105 -obj_full_size(s));
3106 obj_cgroup_put(objcg);
3107 }
3108 }
3109
3110 /*
3111 * The objcg is only set on the first page, so transfer it to all the
3112 * other pages.
3113 */
split_page_memcg(struct page * page,unsigned order)3114 void split_page_memcg(struct page *page, unsigned order)
3115 {
3116 struct obj_cgroup *objcg = page_objcg(page);
3117 unsigned int i, nr = 1 << order;
3118
3119 if (!objcg)
3120 return;
3121
3122 for (i = 1; i < nr; i++)
3123 page_set_objcg(&page[i], objcg);
3124
3125 obj_cgroup_get_many(objcg, nr - 1);
3126 }
3127
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)3128 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3129 unsigned new_order)
3130 {
3131 unsigned new_refs;
3132
3133 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3134 return;
3135
3136 new_refs = (1 << (old_order - new_order)) - 1;
3137 css_get_many(&__folio_memcg(folio)->css, new_refs);
3138 }
3139
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3140 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3141 {
3142 unsigned long val;
3143
3144 if (mem_cgroup_is_root(memcg)) {
3145 /*
3146 * Approximate root's usage from global state. This isn't
3147 * perfect, but the root usage was always an approximation.
3148 */
3149 val = global_node_page_state(NR_FILE_PAGES) +
3150 global_node_page_state(NR_ANON_MAPPED);
3151 if (swap)
3152 val += total_swap_pages - get_nr_swap_pages();
3153 } else {
3154 if (!swap)
3155 val = page_counter_read(&memcg->memory);
3156 else
3157 val = page_counter_read(&memcg->memsw);
3158 }
3159 return val;
3160 }
3161
memcg_online_kmem(struct mem_cgroup * memcg)3162 static int memcg_online_kmem(struct mem_cgroup *memcg)
3163 {
3164 struct obj_cgroup *objcg;
3165
3166 if (mem_cgroup_kmem_disabled())
3167 return 0;
3168
3169 if (unlikely(mem_cgroup_is_root(memcg)))
3170 return 0;
3171
3172 objcg = obj_cgroup_alloc();
3173 if (!objcg)
3174 return -ENOMEM;
3175
3176 objcg->memcg = memcg;
3177 rcu_assign_pointer(memcg->objcg, objcg);
3178 obj_cgroup_get(objcg);
3179 memcg->orig_objcg = objcg;
3180
3181 static_branch_enable(&memcg_kmem_online_key);
3182
3183 memcg->kmemcg_id = memcg->id.id;
3184
3185 return 0;
3186 }
3187
memcg_offline_kmem(struct mem_cgroup * memcg)3188 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3189 {
3190 struct mem_cgroup *parent;
3191
3192 if (mem_cgroup_kmem_disabled())
3193 return;
3194
3195 if (unlikely(mem_cgroup_is_root(memcg)))
3196 return;
3197
3198 parent = parent_mem_cgroup(memcg);
3199 if (!parent)
3200 parent = root_mem_cgroup;
3201
3202 memcg_reparent_list_lrus(memcg, parent);
3203
3204 /*
3205 * Objcg's reparenting must be after list_lru's, make sure list_lru
3206 * helpers won't use parent's list_lru until child is drained.
3207 */
3208 memcg_reparent_objcgs(memcg, parent);
3209 }
3210
3211 #ifdef CONFIG_CGROUP_WRITEBACK
3212
3213 #include <trace/events/writeback.h>
3214
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3215 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3216 {
3217 return wb_domain_init(&memcg->cgwb_domain, gfp);
3218 }
3219
memcg_wb_domain_exit(struct mem_cgroup * memcg)3220 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3221 {
3222 wb_domain_exit(&memcg->cgwb_domain);
3223 }
3224
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3225 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3226 {
3227 wb_domain_size_changed(&memcg->cgwb_domain);
3228 }
3229
mem_cgroup_wb_domain(struct bdi_writeback * wb)3230 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3231 {
3232 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3233
3234 if (!memcg->css.parent)
3235 return NULL;
3236
3237 return &memcg->cgwb_domain;
3238 }
3239
3240 /**
3241 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3242 * @wb: bdi_writeback in question
3243 * @pfilepages: out parameter for number of file pages
3244 * @pheadroom: out parameter for number of allocatable pages according to memcg
3245 * @pdirty: out parameter for number of dirty pages
3246 * @pwriteback: out parameter for number of pages under writeback
3247 *
3248 * Determine the numbers of file, headroom, dirty, and writeback pages in
3249 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3250 * is a bit more involved.
3251 *
3252 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3253 * headroom is calculated as the lowest headroom of itself and the
3254 * ancestors. Note that this doesn't consider the actual amount of
3255 * available memory in the system. The caller should further cap
3256 * *@pheadroom accordingly.
3257 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3258 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3259 unsigned long *pheadroom, unsigned long *pdirty,
3260 unsigned long *pwriteback)
3261 {
3262 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3263 struct mem_cgroup *parent;
3264
3265 mem_cgroup_flush_stats_ratelimited(memcg);
3266
3267 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3268 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3269 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3270 memcg_page_state(memcg, NR_ACTIVE_FILE);
3271
3272 *pheadroom = PAGE_COUNTER_MAX;
3273 while ((parent = parent_mem_cgroup(memcg))) {
3274 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3275 READ_ONCE(memcg->memory.high));
3276 unsigned long used = page_counter_read(&memcg->memory);
3277
3278 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3279 memcg = parent;
3280 }
3281 }
3282
3283 /*
3284 * Foreign dirty flushing
3285 *
3286 * There's an inherent mismatch between memcg and writeback. The former
3287 * tracks ownership per-page while the latter per-inode. This was a
3288 * deliberate design decision because honoring per-page ownership in the
3289 * writeback path is complicated, may lead to higher CPU and IO overheads
3290 * and deemed unnecessary given that write-sharing an inode across
3291 * different cgroups isn't a common use-case.
3292 *
3293 * Combined with inode majority-writer ownership switching, this works well
3294 * enough in most cases but there are some pathological cases. For
3295 * example, let's say there are two cgroups A and B which keep writing to
3296 * different but confined parts of the same inode. B owns the inode and
3297 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3298 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3299 * triggering background writeback. A will be slowed down without a way to
3300 * make writeback of the dirty pages happen.
3301 *
3302 * Conditions like the above can lead to a cgroup getting repeatedly and
3303 * severely throttled after making some progress after each
3304 * dirty_expire_interval while the underlying IO device is almost
3305 * completely idle.
3306 *
3307 * Solving this problem completely requires matching the ownership tracking
3308 * granularities between memcg and writeback in either direction. However,
3309 * the more egregious behaviors can be avoided by simply remembering the
3310 * most recent foreign dirtying events and initiating remote flushes on
3311 * them when local writeback isn't enough to keep the memory clean enough.
3312 *
3313 * The following two functions implement such mechanism. When a foreign
3314 * page - a page whose memcg and writeback ownerships don't match - is
3315 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3316 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3317 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3318 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3319 * foreign bdi_writebacks which haven't expired. Both the numbers of
3320 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3321 * limited to MEMCG_CGWB_FRN_CNT.
3322 *
3323 * The mechanism only remembers IDs and doesn't hold any object references.
3324 * As being wrong occasionally doesn't matter, updates and accesses to the
3325 * records are lockless and racy.
3326 */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3327 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3328 struct bdi_writeback *wb)
3329 {
3330 struct mem_cgroup *memcg = folio_memcg(folio);
3331 struct memcg_cgwb_frn *frn;
3332 u64 now = get_jiffies_64();
3333 u64 oldest_at = now;
3334 int oldest = -1;
3335 int i;
3336
3337 trace_track_foreign_dirty(folio, wb);
3338
3339 /*
3340 * Pick the slot to use. If there is already a slot for @wb, keep
3341 * using it. If not replace the oldest one which isn't being
3342 * written out.
3343 */
3344 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3345 frn = &memcg->cgwb_frn[i];
3346 if (frn->bdi_id == wb->bdi->id &&
3347 frn->memcg_id == wb->memcg_css->id)
3348 break;
3349 if (time_before64(frn->at, oldest_at) &&
3350 atomic_read(&frn->done.cnt) == 1) {
3351 oldest = i;
3352 oldest_at = frn->at;
3353 }
3354 }
3355
3356 if (i < MEMCG_CGWB_FRN_CNT) {
3357 /*
3358 * Re-using an existing one. Update timestamp lazily to
3359 * avoid making the cacheline hot. We want them to be
3360 * reasonably up-to-date and significantly shorter than
3361 * dirty_expire_interval as that's what expires the record.
3362 * Use the shorter of 1s and dirty_expire_interval / 8.
3363 */
3364 unsigned long update_intv =
3365 min_t(unsigned long, HZ,
3366 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3367
3368 if (time_before64(frn->at, now - update_intv))
3369 frn->at = now;
3370 } else if (oldest >= 0) {
3371 /* replace the oldest free one */
3372 frn = &memcg->cgwb_frn[oldest];
3373 frn->bdi_id = wb->bdi->id;
3374 frn->memcg_id = wb->memcg_css->id;
3375 frn->at = now;
3376 }
3377 }
3378
3379 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3380 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3381 {
3382 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3383 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3384 u64 now = jiffies_64;
3385 int i;
3386
3387 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3388 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3389
3390 /*
3391 * If the record is older than dirty_expire_interval,
3392 * writeback on it has already started. No need to kick it
3393 * off again. Also, don't start a new one if there's
3394 * already one in flight.
3395 */
3396 if (time_after64(frn->at, now - intv) &&
3397 atomic_read(&frn->done.cnt) == 1) {
3398 frn->at = 0;
3399 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3400 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3401 WB_REASON_FOREIGN_FLUSH,
3402 &frn->done);
3403 }
3404 }
3405 }
3406
3407 #else /* CONFIG_CGROUP_WRITEBACK */
3408
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3409 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3410 {
3411 return 0;
3412 }
3413
memcg_wb_domain_exit(struct mem_cgroup * memcg)3414 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3415 {
3416 }
3417
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3418 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3419 {
3420 }
3421
3422 #endif /* CONFIG_CGROUP_WRITEBACK */
3423
3424 /*
3425 * Private memory cgroup IDR
3426 *
3427 * Swap-out records and page cache shadow entries need to store memcg
3428 * references in constrained space, so we maintain an ID space that is
3429 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3430 * memory-controlled cgroups to 64k.
3431 *
3432 * However, there usually are many references to the offline CSS after
3433 * the cgroup has been destroyed, such as page cache or reclaimable
3434 * slab objects, that don't need to hang on to the ID. We want to keep
3435 * those dead CSS from occupying IDs, or we might quickly exhaust the
3436 * relatively small ID space and prevent the creation of new cgroups
3437 * even when there are much fewer than 64k cgroups - possibly none.
3438 *
3439 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3440 * be freed and recycled when it's no longer needed, which is usually
3441 * when the CSS is offlined.
3442 *
3443 * The only exception to that are records of swapped out tmpfs/shmem
3444 * pages that need to be attributed to live ancestors on swapin. But
3445 * those references are manageable from userspace.
3446 */
3447
3448 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3449 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3450
mem_cgroup_id_remove(struct mem_cgroup * memcg)3451 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3452 {
3453 if (memcg->id.id > 0) {
3454 xa_erase(&mem_cgroup_ids, memcg->id.id);
3455 memcg->id.id = 0;
3456 }
3457 }
3458
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3459 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3460 unsigned int n)
3461 {
3462 refcount_add(n, &memcg->id.ref);
3463 }
3464
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3465 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3466 {
3467 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3468 mem_cgroup_id_remove(memcg);
3469
3470 /* Memcg ID pins CSS */
3471 css_put(&memcg->css);
3472 }
3473 }
3474
mem_cgroup_id_put(struct mem_cgroup * memcg)3475 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3476 {
3477 mem_cgroup_id_put_many(memcg, 1);
3478 }
3479
mem_cgroup_id_get_online(struct mem_cgroup * memcg)3480 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
3481 {
3482 while (!refcount_inc_not_zero(&memcg->id.ref)) {
3483 /*
3484 * The root cgroup cannot be destroyed, so it's refcount must
3485 * always be >= 1.
3486 */
3487 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3488 VM_BUG_ON(1);
3489 break;
3490 }
3491 memcg = parent_mem_cgroup(memcg);
3492 if (!memcg)
3493 memcg = root_mem_cgroup;
3494 }
3495 return memcg;
3496 }
3497
3498 /**
3499 * mem_cgroup_from_id - look up a memcg from a memcg id
3500 * @id: the memcg id to look up
3501 *
3502 * Caller must hold rcu_read_lock().
3503 */
mem_cgroup_from_id(unsigned short id)3504 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3505 {
3506 WARN_ON_ONCE(!rcu_read_lock_held());
3507 return xa_load(&mem_cgroup_ids, id);
3508 }
3509
3510 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3511 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3512 {
3513 struct cgroup *cgrp;
3514 struct cgroup_subsys_state *css;
3515 struct mem_cgroup *memcg;
3516
3517 cgrp = cgroup_get_from_id(ino);
3518 if (IS_ERR(cgrp))
3519 return ERR_CAST(cgrp);
3520
3521 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3522 if (css)
3523 memcg = container_of(css, struct mem_cgroup, css);
3524 else
3525 memcg = ERR_PTR(-ENOENT);
3526
3527 cgroup_put(cgrp);
3528
3529 return memcg;
3530 }
3531 #endif
3532
free_mem_cgroup_per_node_info(struct mem_cgroup_per_node * pn)3533 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3534 {
3535 if (!pn)
3536 return;
3537
3538 free_percpu(pn->lruvec_stats_percpu);
3539 kfree(pn->lruvec_stats);
3540 kfree(pn);
3541 }
3542
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3543 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3544 {
3545 struct mem_cgroup_per_node *pn;
3546
3547 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3548 if (!pn)
3549 return false;
3550
3551 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3552 GFP_KERNEL_ACCOUNT, node);
3553 if (!pn->lruvec_stats)
3554 goto fail;
3555
3556 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3557 GFP_KERNEL_ACCOUNT);
3558 if (!pn->lruvec_stats_percpu)
3559 goto fail;
3560
3561 lruvec_init(&pn->lruvec);
3562 pn->memcg = memcg;
3563
3564 memcg->nodeinfo[node] = pn;
3565 return true;
3566 fail:
3567 free_mem_cgroup_per_node_info(pn);
3568 return false;
3569 }
3570
__mem_cgroup_free(struct mem_cgroup * memcg)3571 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3572 {
3573 int node;
3574
3575 obj_cgroup_put(memcg->orig_objcg);
3576
3577 for_each_node(node)
3578 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3579 memcg1_free_events(memcg);
3580 kfree(memcg->vmstats);
3581 free_percpu(memcg->vmstats_percpu);
3582 kfree(memcg);
3583 }
3584
mem_cgroup_free(struct mem_cgroup * memcg)3585 static void mem_cgroup_free(struct mem_cgroup *memcg)
3586 {
3587 lru_gen_exit_memcg(memcg);
3588 memcg_wb_domain_exit(memcg);
3589 __mem_cgroup_free(memcg);
3590 }
3591
mem_cgroup_alloc(struct mem_cgroup * parent)3592 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3593 {
3594 struct memcg_vmstats_percpu *statc, *pstatc;
3595 struct mem_cgroup *memcg;
3596 int node, cpu;
3597 int __maybe_unused i;
3598 long error;
3599
3600 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3601 if (!memcg)
3602 return ERR_PTR(-ENOMEM);
3603
3604 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3605 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3606 if (error)
3607 goto fail;
3608 error = -ENOMEM;
3609
3610 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3611 GFP_KERNEL_ACCOUNT);
3612 if (!memcg->vmstats)
3613 goto fail;
3614
3615 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3616 GFP_KERNEL_ACCOUNT);
3617 if (!memcg->vmstats_percpu)
3618 goto fail;
3619
3620 if (!memcg1_alloc_events(memcg))
3621 goto fail;
3622
3623 for_each_possible_cpu(cpu) {
3624 if (parent)
3625 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3626 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3627 statc->parent = parent ? pstatc : NULL;
3628 statc->vmstats = memcg->vmstats;
3629 }
3630
3631 for_each_node(node)
3632 if (!alloc_mem_cgroup_per_node_info(memcg, node))
3633 goto fail;
3634
3635 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3636 goto fail;
3637
3638 INIT_WORK(&memcg->high_work, high_work_func);
3639 vmpressure_init(&memcg->vmpressure);
3640 INIT_LIST_HEAD(&memcg->memory_peaks);
3641 INIT_LIST_HEAD(&memcg->swap_peaks);
3642 spin_lock_init(&memcg->peaks_lock);
3643 memcg->socket_pressure = jiffies;
3644 memcg1_memcg_init(memcg);
3645 memcg->kmemcg_id = -1;
3646 INIT_LIST_HEAD(&memcg->objcg_list);
3647 #ifdef CONFIG_CGROUP_WRITEBACK
3648 INIT_LIST_HEAD(&memcg->cgwb_list);
3649 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3650 memcg->cgwb_frn[i].done =
3651 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3652 #endif
3653 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3654 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3655 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3656 memcg->deferred_split_queue.split_queue_len = 0;
3657 #endif
3658 lru_gen_init_memcg(memcg);
3659 return memcg;
3660 fail:
3661 mem_cgroup_id_remove(memcg);
3662 __mem_cgroup_free(memcg);
3663 return ERR_PTR(error);
3664 }
3665
3666 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3667 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3668 {
3669 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3670 struct mem_cgroup *memcg, *old_memcg;
3671 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3672
3673 old_memcg = set_active_memcg(parent);
3674 memcg = mem_cgroup_alloc(parent);
3675 set_active_memcg(old_memcg);
3676 if (IS_ERR(memcg))
3677 return ERR_CAST(memcg);
3678
3679 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3680 memcg1_soft_limit_reset(memcg);
3681 #ifdef CONFIG_ZSWAP
3682 memcg->zswap_max = PAGE_COUNTER_MAX;
3683 WRITE_ONCE(memcg->zswap_writeback, true);
3684 #endif
3685 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3686 if (parent) {
3687 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3688
3689 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3690 page_counter_init(&memcg->swap, &parent->swap, false);
3691 #ifdef CONFIG_MEMCG_V1
3692 memcg->memory.track_failcnt = !memcg_on_dfl;
3693 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3694 page_counter_init(&memcg->kmem, &parent->kmem, false);
3695 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3696 #endif
3697 } else {
3698 init_memcg_stats();
3699 init_memcg_events();
3700 page_counter_init(&memcg->memory, NULL, true);
3701 page_counter_init(&memcg->swap, NULL, false);
3702 #ifdef CONFIG_MEMCG_V1
3703 page_counter_init(&memcg->kmem, NULL, false);
3704 page_counter_init(&memcg->tcpmem, NULL, false);
3705 #endif
3706 root_mem_cgroup = memcg;
3707 return &memcg->css;
3708 }
3709
3710 if (memcg_on_dfl && !cgroup_memory_nosocket)
3711 static_branch_inc(&memcg_sockets_enabled_key);
3712
3713 if (!cgroup_memory_nobpf)
3714 static_branch_inc(&memcg_bpf_enabled_key);
3715
3716 return &memcg->css;
3717 }
3718
mem_cgroup_css_online(struct cgroup_subsys_state * css)3719 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3720 {
3721 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3722
3723 if (memcg_online_kmem(memcg))
3724 goto remove_id;
3725
3726 /*
3727 * A memcg must be visible for expand_shrinker_info()
3728 * by the time the maps are allocated. So, we allocate maps
3729 * here, when for_each_mem_cgroup() can't skip it.
3730 */
3731 if (alloc_shrinker_info(memcg))
3732 goto offline_kmem;
3733
3734 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3735 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3736 FLUSH_TIME);
3737 lru_gen_online_memcg(memcg);
3738
3739 /* Online state pins memcg ID, memcg ID pins CSS */
3740 refcount_set(&memcg->id.ref, 1);
3741 css_get(css);
3742
3743 /*
3744 * Ensure mem_cgroup_from_id() works once we're fully online.
3745 *
3746 * We could do this earlier and require callers to filter with
3747 * css_tryget_online(). But right now there are no users that
3748 * need earlier access, and the workingset code relies on the
3749 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3750 * publish it here at the end of onlining. This matches the
3751 * regular ID destruction during offlining.
3752 */
3753 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3754
3755 return 0;
3756 offline_kmem:
3757 memcg_offline_kmem(memcg);
3758 remove_id:
3759 mem_cgroup_id_remove(memcg);
3760 return -ENOMEM;
3761 }
3762
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3763 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3764 {
3765 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3766
3767 memcg1_css_offline(memcg);
3768
3769 page_counter_set_min(&memcg->memory, 0);
3770 page_counter_set_low(&memcg->memory, 0);
3771
3772 zswap_memcg_offline_cleanup(memcg);
3773
3774 memcg_offline_kmem(memcg);
3775 reparent_shrinker_deferred(memcg);
3776 wb_memcg_offline(memcg);
3777 lru_gen_offline_memcg(memcg);
3778
3779 drain_all_stock(memcg);
3780
3781 mem_cgroup_id_put(memcg);
3782 }
3783
mem_cgroup_css_released(struct cgroup_subsys_state * css)3784 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3785 {
3786 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3787
3788 invalidate_reclaim_iterators(memcg);
3789 lru_gen_release_memcg(memcg);
3790 }
3791
mem_cgroup_css_free(struct cgroup_subsys_state * css)3792 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3793 {
3794 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3795 int __maybe_unused i;
3796
3797 #ifdef CONFIG_CGROUP_WRITEBACK
3798 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3799 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3800 #endif
3801 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3802 static_branch_dec(&memcg_sockets_enabled_key);
3803
3804 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3805 static_branch_dec(&memcg_sockets_enabled_key);
3806
3807 if (!cgroup_memory_nobpf)
3808 static_branch_dec(&memcg_bpf_enabled_key);
3809
3810 vmpressure_cleanup(&memcg->vmpressure);
3811 cancel_work_sync(&memcg->high_work);
3812 memcg1_remove_from_trees(memcg);
3813 free_shrinker_info(memcg);
3814 mem_cgroup_free(memcg);
3815 }
3816
3817 /**
3818 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3819 * @css: the target css
3820 *
3821 * Reset the states of the mem_cgroup associated with @css. This is
3822 * invoked when the userland requests disabling on the default hierarchy
3823 * but the memcg is pinned through dependency. The memcg should stop
3824 * applying policies and should revert to the vanilla state as it may be
3825 * made visible again.
3826 *
3827 * The current implementation only resets the essential configurations.
3828 * This needs to be expanded to cover all the visible parts.
3829 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3830 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3831 {
3832 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3833
3834 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3835 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3836 #ifdef CONFIG_MEMCG_V1
3837 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3838 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3839 #endif
3840 page_counter_set_min(&memcg->memory, 0);
3841 page_counter_set_low(&memcg->memory, 0);
3842 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3843 memcg1_soft_limit_reset(memcg);
3844 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3845 memcg_wb_domain_size_changed(memcg);
3846 }
3847
3848 struct aggregate_control {
3849 /* pointer to the aggregated (CPU and subtree aggregated) counters */
3850 long *aggregate;
3851 /* pointer to the non-hierarchichal (CPU aggregated) counters */
3852 long *local;
3853 /* pointer to the pending child counters during tree propagation */
3854 long *pending;
3855 /* pointer to the parent's pending counters, could be NULL */
3856 long *ppending;
3857 /* pointer to the percpu counters to be aggregated */
3858 long *cstat;
3859 /* pointer to the percpu counters of the last aggregation*/
3860 long *cstat_prev;
3861 /* size of the above counters */
3862 int size;
3863 };
3864
mem_cgroup_stat_aggregate(struct aggregate_control * ac)3865 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3866 {
3867 int i;
3868 long delta, delta_cpu, v;
3869
3870 for (i = 0; i < ac->size; i++) {
3871 /*
3872 * Collect the aggregated propagation counts of groups
3873 * below us. We're in a per-cpu loop here and this is
3874 * a global counter, so the first cycle will get them.
3875 */
3876 delta = ac->pending[i];
3877 if (delta)
3878 ac->pending[i] = 0;
3879
3880 /* Add CPU changes on this level since the last flush */
3881 delta_cpu = 0;
3882 v = READ_ONCE(ac->cstat[i]);
3883 if (v != ac->cstat_prev[i]) {
3884 delta_cpu = v - ac->cstat_prev[i];
3885 delta += delta_cpu;
3886 ac->cstat_prev[i] = v;
3887 }
3888
3889 /* Aggregate counts on this level and propagate upwards */
3890 if (delta_cpu)
3891 ac->local[i] += delta_cpu;
3892
3893 if (delta) {
3894 ac->aggregate[i] += delta;
3895 if (ac->ppending)
3896 ac->ppending[i] += delta;
3897 }
3898 }
3899 }
3900
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3901 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3902 {
3903 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3904 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3905 struct memcg_vmstats_percpu *statc;
3906 struct aggregate_control ac;
3907 int nid;
3908
3909 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3910
3911 ac = (struct aggregate_control) {
3912 .aggregate = memcg->vmstats->state,
3913 .local = memcg->vmstats->state_local,
3914 .pending = memcg->vmstats->state_pending,
3915 .ppending = parent ? parent->vmstats->state_pending : NULL,
3916 .cstat = statc->state,
3917 .cstat_prev = statc->state_prev,
3918 .size = MEMCG_VMSTAT_SIZE,
3919 };
3920 mem_cgroup_stat_aggregate(&ac);
3921
3922 ac = (struct aggregate_control) {
3923 .aggregate = memcg->vmstats->events,
3924 .local = memcg->vmstats->events_local,
3925 .pending = memcg->vmstats->events_pending,
3926 .ppending = parent ? parent->vmstats->events_pending : NULL,
3927 .cstat = statc->events,
3928 .cstat_prev = statc->events_prev,
3929 .size = NR_MEMCG_EVENTS,
3930 };
3931 mem_cgroup_stat_aggregate(&ac);
3932
3933 for_each_node_state(nid, N_MEMORY) {
3934 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3935 struct lruvec_stats *lstats = pn->lruvec_stats;
3936 struct lruvec_stats *plstats = NULL;
3937 struct lruvec_stats_percpu *lstatc;
3938
3939 if (parent)
3940 plstats = parent->nodeinfo[nid]->lruvec_stats;
3941
3942 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3943
3944 ac = (struct aggregate_control) {
3945 .aggregate = lstats->state,
3946 .local = lstats->state_local,
3947 .pending = lstats->state_pending,
3948 .ppending = plstats ? plstats->state_pending : NULL,
3949 .cstat = lstatc->state,
3950 .cstat_prev = lstatc->state_prev,
3951 .size = NR_MEMCG_NODE_STAT_ITEMS,
3952 };
3953 mem_cgroup_stat_aggregate(&ac);
3954
3955 }
3956 WRITE_ONCE(statc->stats_updates, 0);
3957 /* We are in a per-cpu loop here, only do the atomic write once */
3958 if (atomic64_read(&memcg->vmstats->stats_updates))
3959 atomic64_set(&memcg->vmstats->stats_updates, 0);
3960 }
3961
mem_cgroup_fork(struct task_struct * task)3962 static void mem_cgroup_fork(struct task_struct *task)
3963 {
3964 /*
3965 * Set the update flag to cause task->objcg to be initialized lazily
3966 * on the first allocation. It can be done without any synchronization
3967 * because it's always performed on the current task, so does
3968 * current_objcg_update().
3969 */
3970 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3971 }
3972
mem_cgroup_exit(struct task_struct * task)3973 static void mem_cgroup_exit(struct task_struct *task)
3974 {
3975 struct obj_cgroup *objcg = task->objcg;
3976
3977 objcg = (struct obj_cgroup *)
3978 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3979 obj_cgroup_put(objcg);
3980
3981 /*
3982 * Some kernel allocations can happen after this point,
3983 * but let's ignore them. It can be done without any synchronization
3984 * because it's always performed on the current task, so does
3985 * current_objcg_update().
3986 */
3987 task->objcg = NULL;
3988 }
3989
3990 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3991 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3992 {
3993 struct task_struct *task;
3994 struct cgroup_subsys_state *css;
3995
3996 /* find the first leader if there is any */
3997 cgroup_taskset_for_each_leader(task, css, tset)
3998 break;
3999
4000 if (!task)
4001 return;
4002
4003 task_lock(task);
4004 if (task->mm && READ_ONCE(task->mm->owner) == task)
4005 lru_gen_migrate_mm(task->mm);
4006 task_unlock(task);
4007 }
4008 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4009 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4010 #endif /* CONFIG_LRU_GEN */
4011
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)4012 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4013 {
4014 struct task_struct *task;
4015 struct cgroup_subsys_state *css;
4016
4017 cgroup_taskset_for_each(task, css, tset) {
4018 /* atomically set the update bit */
4019 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4020 }
4021 }
4022
mem_cgroup_attach(struct cgroup_taskset * tset)4023 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4024 {
4025 mem_cgroup_lru_gen_attach(tset);
4026 mem_cgroup_kmem_attach(tset);
4027 }
4028
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)4029 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4030 {
4031 if (value == PAGE_COUNTER_MAX)
4032 seq_puts(m, "max\n");
4033 else
4034 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4035
4036 return 0;
4037 }
4038
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)4039 static u64 memory_current_read(struct cgroup_subsys_state *css,
4040 struct cftype *cft)
4041 {
4042 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4043
4044 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4045 }
4046
4047 #define OFP_PEAK_UNSET (((-1UL)))
4048
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)4049 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4050 {
4051 struct cgroup_of_peak *ofp = of_peak(sf->private);
4052 u64 fd_peak = READ_ONCE(ofp->value), peak;
4053
4054 /* User wants global or local peak? */
4055 if (fd_peak == OFP_PEAK_UNSET)
4056 peak = pc->watermark;
4057 else
4058 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4059
4060 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4061 return 0;
4062 }
4063
memory_peak_show(struct seq_file * sf,void * v)4064 static int memory_peak_show(struct seq_file *sf, void *v)
4065 {
4066 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4067
4068 return peak_show(sf, v, &memcg->memory);
4069 }
4070
peak_open(struct kernfs_open_file * of)4071 static int peak_open(struct kernfs_open_file *of)
4072 {
4073 struct cgroup_of_peak *ofp = of_peak(of);
4074
4075 ofp->value = OFP_PEAK_UNSET;
4076 return 0;
4077 }
4078
peak_release(struct kernfs_open_file * of)4079 static void peak_release(struct kernfs_open_file *of)
4080 {
4081 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4082 struct cgroup_of_peak *ofp = of_peak(of);
4083
4084 if (ofp->value == OFP_PEAK_UNSET) {
4085 /* fast path (no writes on this fd) */
4086 return;
4087 }
4088 spin_lock(&memcg->peaks_lock);
4089 list_del(&ofp->list);
4090 spin_unlock(&memcg->peaks_lock);
4091 }
4092
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)4093 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4094 loff_t off, struct page_counter *pc,
4095 struct list_head *watchers)
4096 {
4097 unsigned long usage;
4098 struct cgroup_of_peak *peer_ctx;
4099 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4100 struct cgroup_of_peak *ofp = of_peak(of);
4101
4102 spin_lock(&memcg->peaks_lock);
4103
4104 usage = page_counter_read(pc);
4105 WRITE_ONCE(pc->local_watermark, usage);
4106
4107 list_for_each_entry(peer_ctx, watchers, list)
4108 if (usage > peer_ctx->value)
4109 WRITE_ONCE(peer_ctx->value, usage);
4110
4111 /* initial write, register watcher */
4112 if (ofp->value == OFP_PEAK_UNSET)
4113 list_add(&ofp->list, watchers);
4114
4115 WRITE_ONCE(ofp->value, usage);
4116 spin_unlock(&memcg->peaks_lock);
4117
4118 return nbytes;
4119 }
4120
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4121 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4122 size_t nbytes, loff_t off)
4123 {
4124 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4125
4126 return peak_write(of, buf, nbytes, off, &memcg->memory,
4127 &memcg->memory_peaks);
4128 }
4129
4130 #undef OFP_PEAK_UNSET
4131
memory_min_show(struct seq_file * m,void * v)4132 static int memory_min_show(struct seq_file *m, void *v)
4133 {
4134 return seq_puts_memcg_tunable(m,
4135 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4136 }
4137
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4138 static ssize_t memory_min_write(struct kernfs_open_file *of,
4139 char *buf, size_t nbytes, loff_t off)
4140 {
4141 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4142 unsigned long min;
4143 int err;
4144
4145 buf = strstrip(buf);
4146 err = page_counter_memparse(buf, "max", &min);
4147 if (err)
4148 return err;
4149
4150 page_counter_set_min(&memcg->memory, min);
4151
4152 return nbytes;
4153 }
4154
memory_low_show(struct seq_file * m,void * v)4155 static int memory_low_show(struct seq_file *m, void *v)
4156 {
4157 return seq_puts_memcg_tunable(m,
4158 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4159 }
4160
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4161 static ssize_t memory_low_write(struct kernfs_open_file *of,
4162 char *buf, size_t nbytes, loff_t off)
4163 {
4164 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4165 unsigned long low;
4166 int err;
4167
4168 buf = strstrip(buf);
4169 err = page_counter_memparse(buf, "max", &low);
4170 if (err)
4171 return err;
4172
4173 page_counter_set_low(&memcg->memory, low);
4174
4175 return nbytes;
4176 }
4177
memory_high_show(struct seq_file * m,void * v)4178 static int memory_high_show(struct seq_file *m, void *v)
4179 {
4180 return seq_puts_memcg_tunable(m,
4181 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4182 }
4183
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4184 static ssize_t memory_high_write(struct kernfs_open_file *of,
4185 char *buf, size_t nbytes, loff_t off)
4186 {
4187 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4188 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4189 bool drained = false;
4190 unsigned long high;
4191 int err;
4192
4193 buf = strstrip(buf);
4194 err = page_counter_memparse(buf, "max", &high);
4195 if (err)
4196 return err;
4197
4198 page_counter_set_high(&memcg->memory, high);
4199
4200 for (;;) {
4201 unsigned long nr_pages = page_counter_read(&memcg->memory);
4202 unsigned long reclaimed;
4203
4204 if (nr_pages <= high)
4205 break;
4206
4207 if (signal_pending(current))
4208 break;
4209
4210 if (!drained) {
4211 drain_all_stock(memcg);
4212 drained = true;
4213 continue;
4214 }
4215
4216 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4217 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4218
4219 if (!reclaimed && !nr_retries--)
4220 break;
4221 }
4222
4223 memcg_wb_domain_size_changed(memcg);
4224 return nbytes;
4225 }
4226
memory_max_show(struct seq_file * m,void * v)4227 static int memory_max_show(struct seq_file *m, void *v)
4228 {
4229 return seq_puts_memcg_tunable(m,
4230 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4231 }
4232
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4233 static ssize_t memory_max_write(struct kernfs_open_file *of,
4234 char *buf, size_t nbytes, loff_t off)
4235 {
4236 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4237 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4238 bool drained = false;
4239 unsigned long max;
4240 int err;
4241
4242 buf = strstrip(buf);
4243 err = page_counter_memparse(buf, "max", &max);
4244 if (err)
4245 return err;
4246
4247 xchg(&memcg->memory.max, max);
4248
4249 for (;;) {
4250 unsigned long nr_pages = page_counter_read(&memcg->memory);
4251
4252 if (nr_pages <= max)
4253 break;
4254
4255 if (signal_pending(current))
4256 break;
4257
4258 if (!drained) {
4259 drain_all_stock(memcg);
4260 drained = true;
4261 continue;
4262 }
4263
4264 if (nr_reclaims) {
4265 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4266 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4267 nr_reclaims--;
4268 continue;
4269 }
4270
4271 memcg_memory_event(memcg, MEMCG_OOM);
4272 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4273 break;
4274 cond_resched();
4275 }
4276
4277 memcg_wb_domain_size_changed(memcg);
4278 return nbytes;
4279 }
4280
4281 /*
4282 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4283 * if any new events become available.
4284 */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4285 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4286 {
4287 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4288 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4289 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4290 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4291 seq_printf(m, "oom_kill %lu\n",
4292 atomic_long_read(&events[MEMCG_OOM_KILL]));
4293 seq_printf(m, "oom_group_kill %lu\n",
4294 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4295 }
4296
memory_events_show(struct seq_file * m,void * v)4297 static int memory_events_show(struct seq_file *m, void *v)
4298 {
4299 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4300
4301 __memory_events_show(m, memcg->memory_events);
4302 return 0;
4303 }
4304
memory_events_local_show(struct seq_file * m,void * v)4305 static int memory_events_local_show(struct seq_file *m, void *v)
4306 {
4307 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4308
4309 __memory_events_show(m, memcg->memory_events_local);
4310 return 0;
4311 }
4312
memory_stat_show(struct seq_file * m,void * v)4313 int memory_stat_show(struct seq_file *m, void *v)
4314 {
4315 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4316 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4317 struct seq_buf s;
4318
4319 if (!buf)
4320 return -ENOMEM;
4321 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4322 memory_stat_format(memcg, &s);
4323 seq_puts(m, buf);
4324 kfree(buf);
4325 return 0;
4326 }
4327
4328 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4329 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4330 int item)
4331 {
4332 return lruvec_page_state(lruvec, item) *
4333 memcg_page_state_output_unit(item);
4334 }
4335
memory_numa_stat_show(struct seq_file * m,void * v)4336 static int memory_numa_stat_show(struct seq_file *m, void *v)
4337 {
4338 int i;
4339 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4340
4341 mem_cgroup_flush_stats(memcg);
4342
4343 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4344 int nid;
4345
4346 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4347 continue;
4348
4349 seq_printf(m, "%s", memory_stats[i].name);
4350 for_each_node_state(nid, N_MEMORY) {
4351 u64 size;
4352 struct lruvec *lruvec;
4353
4354 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4355 size = lruvec_page_state_output(lruvec,
4356 memory_stats[i].idx);
4357 seq_printf(m, " N%d=%llu", nid, size);
4358 }
4359 seq_putc(m, '\n');
4360 }
4361
4362 return 0;
4363 }
4364 #endif
4365
memory_oom_group_show(struct seq_file * m,void * v)4366 static int memory_oom_group_show(struct seq_file *m, void *v)
4367 {
4368 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4369
4370 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4371
4372 return 0;
4373 }
4374
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4375 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4376 char *buf, size_t nbytes, loff_t off)
4377 {
4378 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4379 int ret, oom_group;
4380
4381 buf = strstrip(buf);
4382 if (!buf)
4383 return -EINVAL;
4384
4385 ret = kstrtoint(buf, 0, &oom_group);
4386 if (ret)
4387 return ret;
4388
4389 if (oom_group != 0 && oom_group != 1)
4390 return -EINVAL;
4391
4392 WRITE_ONCE(memcg->oom_group, oom_group);
4393
4394 return nbytes;
4395 }
4396
4397 enum {
4398 MEMORY_RECLAIM_SWAPPINESS = 0,
4399 MEMORY_RECLAIM_NULL,
4400 };
4401
4402 static const match_table_t tokens = {
4403 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4404 { MEMORY_RECLAIM_NULL, NULL },
4405 };
4406
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4407 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4408 size_t nbytes, loff_t off)
4409 {
4410 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4411 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4412 unsigned long nr_to_reclaim, nr_reclaimed = 0;
4413 int swappiness = -1;
4414 unsigned int reclaim_options;
4415 char *old_buf, *start;
4416 substring_t args[MAX_OPT_ARGS];
4417
4418 buf = strstrip(buf);
4419
4420 old_buf = buf;
4421 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4422 if (buf == old_buf)
4423 return -EINVAL;
4424
4425 buf = strstrip(buf);
4426
4427 while ((start = strsep(&buf, " ")) != NULL) {
4428 if (!strlen(start))
4429 continue;
4430 switch (match_token(start, tokens, args)) {
4431 case MEMORY_RECLAIM_SWAPPINESS:
4432 if (match_int(&args[0], &swappiness))
4433 return -EINVAL;
4434 if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4435 return -EINVAL;
4436 break;
4437 default:
4438 return -EINVAL;
4439 }
4440 }
4441
4442 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4443 while (nr_reclaimed < nr_to_reclaim) {
4444 /* Will converge on zero, but reclaim enforces a minimum */
4445 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4446 unsigned long reclaimed;
4447
4448 if (signal_pending(current))
4449 return -EINTR;
4450
4451 /*
4452 * This is the final attempt, drain percpu lru caches in the
4453 * hope of introducing more evictable pages for
4454 * try_to_free_mem_cgroup_pages().
4455 */
4456 if (!nr_retries)
4457 lru_add_drain_all();
4458
4459 reclaimed = try_to_free_mem_cgroup_pages(memcg,
4460 batch_size, GFP_KERNEL,
4461 reclaim_options,
4462 swappiness == -1 ? NULL : &swappiness);
4463
4464 if (!reclaimed && !nr_retries--)
4465 return -EAGAIN;
4466
4467 nr_reclaimed += reclaimed;
4468 }
4469
4470 return nbytes;
4471 }
4472
4473 static struct cftype memory_files[] = {
4474 {
4475 .name = "current",
4476 .flags = CFTYPE_NOT_ON_ROOT,
4477 .read_u64 = memory_current_read,
4478 },
4479 {
4480 .name = "peak",
4481 .flags = CFTYPE_NOT_ON_ROOT,
4482 .open = peak_open,
4483 .release = peak_release,
4484 .seq_show = memory_peak_show,
4485 .write = memory_peak_write,
4486 },
4487 {
4488 .name = "min",
4489 .flags = CFTYPE_NOT_ON_ROOT,
4490 .seq_show = memory_min_show,
4491 .write = memory_min_write,
4492 },
4493 {
4494 .name = "low",
4495 .flags = CFTYPE_NOT_ON_ROOT,
4496 .seq_show = memory_low_show,
4497 .write = memory_low_write,
4498 },
4499 {
4500 .name = "high",
4501 .flags = CFTYPE_NOT_ON_ROOT,
4502 .seq_show = memory_high_show,
4503 .write = memory_high_write,
4504 },
4505 {
4506 .name = "max",
4507 .flags = CFTYPE_NOT_ON_ROOT,
4508 .seq_show = memory_max_show,
4509 .write = memory_max_write,
4510 },
4511 {
4512 .name = "events",
4513 .flags = CFTYPE_NOT_ON_ROOT,
4514 .file_offset = offsetof(struct mem_cgroup, events_file),
4515 .seq_show = memory_events_show,
4516 },
4517 {
4518 .name = "events.local",
4519 .flags = CFTYPE_NOT_ON_ROOT,
4520 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4521 .seq_show = memory_events_local_show,
4522 },
4523 {
4524 .name = "stat",
4525 .seq_show = memory_stat_show,
4526 },
4527 #ifdef CONFIG_NUMA
4528 {
4529 .name = "numa_stat",
4530 .seq_show = memory_numa_stat_show,
4531 },
4532 #endif
4533 {
4534 .name = "oom.group",
4535 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4536 .seq_show = memory_oom_group_show,
4537 .write = memory_oom_group_write,
4538 },
4539 {
4540 .name = "reclaim",
4541 .flags = CFTYPE_NS_DELEGATABLE,
4542 .write = memory_reclaim,
4543 },
4544 { } /* terminate */
4545 };
4546
4547 struct cgroup_subsys memory_cgrp_subsys = {
4548 .css_alloc = mem_cgroup_css_alloc,
4549 .css_online = mem_cgroup_css_online,
4550 .css_offline = mem_cgroup_css_offline,
4551 .css_released = mem_cgroup_css_released,
4552 .css_free = mem_cgroup_css_free,
4553 .css_reset = mem_cgroup_css_reset,
4554 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4555 .attach = mem_cgroup_attach,
4556 .fork = mem_cgroup_fork,
4557 .exit = mem_cgroup_exit,
4558 .dfl_cftypes = memory_files,
4559 #ifdef CONFIG_MEMCG_V1
4560 .legacy_cftypes = mem_cgroup_legacy_files,
4561 #endif
4562 .early_init = 0,
4563 };
4564
4565 /**
4566 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4567 * @root: the top ancestor of the sub-tree being checked
4568 * @memcg: the memory cgroup to check
4569 *
4570 * WARNING: This function is not stateless! It can only be used as part
4571 * of a top-down tree iteration, not for isolated queries.
4572 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4573 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4574 struct mem_cgroup *memcg)
4575 {
4576 bool recursive_protection =
4577 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4578
4579 if (mem_cgroup_disabled())
4580 return;
4581
4582 if (!root)
4583 root = root_mem_cgroup;
4584
4585 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4586 }
4587
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4588 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4589 gfp_t gfp)
4590 {
4591 int ret;
4592
4593 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4594 if (ret)
4595 goto out;
4596
4597 css_get(&memcg->css);
4598 commit_charge(folio, memcg);
4599 memcg1_commit_charge(folio, memcg);
4600 out:
4601 return ret;
4602 }
4603
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4604 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4605 {
4606 struct mem_cgroup *memcg;
4607 int ret;
4608
4609 memcg = get_mem_cgroup_from_mm(mm);
4610 ret = charge_memcg(folio, memcg, gfp);
4611 css_put(&memcg->css);
4612
4613 return ret;
4614 }
4615
4616 /**
4617 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4618 * @folio: folio being charged
4619 * @gfp: reclaim mode
4620 *
4621 * This function is called when allocating a huge page folio, after the page has
4622 * already been obtained and charged to the appropriate hugetlb cgroup
4623 * controller (if it is enabled).
4624 *
4625 * Returns ENOMEM if the memcg is already full.
4626 * Returns 0 if either the charge was successful, or if we skip the charging.
4627 */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4628 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4629 {
4630 struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4631 int ret = 0;
4632
4633 /*
4634 * Even memcg does not account for hugetlb, we still want to update
4635 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4636 * charging the memcg.
4637 */
4638 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4639 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4640 goto out;
4641
4642 if (charge_memcg(folio, memcg, gfp))
4643 ret = -ENOMEM;
4644
4645 out:
4646 mem_cgroup_put(memcg);
4647 return ret;
4648 }
4649
4650 /**
4651 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4652 * @folio: folio to charge.
4653 * @mm: mm context of the victim
4654 * @gfp: reclaim mode
4655 * @entry: swap entry for which the folio is allocated
4656 *
4657 * This function charges a folio allocated for swapin. Please call this before
4658 * adding the folio to the swapcache.
4659 *
4660 * Returns 0 on success. Otherwise, an error code is returned.
4661 */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4662 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4663 gfp_t gfp, swp_entry_t entry)
4664 {
4665 struct mem_cgroup *memcg;
4666 unsigned short id;
4667 int ret;
4668
4669 if (mem_cgroup_disabled())
4670 return 0;
4671
4672 id = lookup_swap_cgroup_id(entry);
4673 rcu_read_lock();
4674 memcg = mem_cgroup_from_id(id);
4675 if (!memcg || !css_tryget_online(&memcg->css))
4676 memcg = get_mem_cgroup_from_mm(mm);
4677 rcu_read_unlock();
4678
4679 ret = charge_memcg(folio, memcg, gfp);
4680
4681 css_put(&memcg->css);
4682 return ret;
4683 }
4684
4685 struct uncharge_gather {
4686 struct mem_cgroup *memcg;
4687 unsigned long nr_memory;
4688 unsigned long pgpgout;
4689 unsigned long nr_kmem;
4690 int nid;
4691 };
4692
uncharge_gather_clear(struct uncharge_gather * ug)4693 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4694 {
4695 memset(ug, 0, sizeof(*ug));
4696 }
4697
uncharge_batch(const struct uncharge_gather * ug)4698 static void uncharge_batch(const struct uncharge_gather *ug)
4699 {
4700 if (ug->nr_memory) {
4701 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4702 if (do_memsw_account())
4703 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4704 if (ug->nr_kmem) {
4705 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4706 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4707 }
4708 memcg1_oom_recover(ug->memcg);
4709 }
4710
4711 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4712
4713 /* drop reference from uncharge_folio */
4714 css_put(&ug->memcg->css);
4715 }
4716
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4717 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4718 {
4719 long nr_pages;
4720 struct mem_cgroup *memcg;
4721 struct obj_cgroup *objcg;
4722
4723 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4724
4725 /*
4726 * Nobody should be changing or seriously looking at
4727 * folio memcg or objcg at this point, we have fully
4728 * exclusive access to the folio.
4729 */
4730 if (folio_memcg_kmem(folio)) {
4731 objcg = __folio_objcg(folio);
4732 /*
4733 * This get matches the put at the end of the function and
4734 * kmem pages do not hold memcg references anymore.
4735 */
4736 memcg = get_mem_cgroup_from_objcg(objcg);
4737 } else {
4738 memcg = __folio_memcg(folio);
4739 }
4740
4741 if (!memcg)
4742 return;
4743
4744 if (ug->memcg != memcg) {
4745 if (ug->memcg) {
4746 uncharge_batch(ug);
4747 uncharge_gather_clear(ug);
4748 }
4749 ug->memcg = memcg;
4750 ug->nid = folio_nid(folio);
4751
4752 /* pairs with css_put in uncharge_batch */
4753 css_get(&memcg->css);
4754 }
4755
4756 nr_pages = folio_nr_pages(folio);
4757
4758 if (folio_memcg_kmem(folio)) {
4759 ug->nr_memory += nr_pages;
4760 ug->nr_kmem += nr_pages;
4761
4762 folio->memcg_data = 0;
4763 obj_cgroup_put(objcg);
4764 } else {
4765 /* LRU pages aren't accounted at the root level */
4766 if (!mem_cgroup_is_root(memcg))
4767 ug->nr_memory += nr_pages;
4768 ug->pgpgout++;
4769
4770 WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4771 folio->memcg_data = 0;
4772 }
4773
4774 css_put(&memcg->css);
4775 }
4776
__mem_cgroup_uncharge(struct folio * folio)4777 void __mem_cgroup_uncharge(struct folio *folio)
4778 {
4779 struct uncharge_gather ug;
4780
4781 /* Don't touch folio->lru of any random page, pre-check: */
4782 if (!folio_memcg_charged(folio))
4783 return;
4784
4785 uncharge_gather_clear(&ug);
4786 uncharge_folio(folio, &ug);
4787 uncharge_batch(&ug);
4788 }
4789
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4790 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4791 {
4792 struct uncharge_gather ug;
4793 unsigned int i;
4794
4795 uncharge_gather_clear(&ug);
4796 for (i = 0; i < folios->nr; i++)
4797 uncharge_folio(folios->folios[i], &ug);
4798 if (ug.memcg)
4799 uncharge_batch(&ug);
4800 }
4801
4802 /**
4803 * mem_cgroup_replace_folio - Charge a folio's replacement.
4804 * @old: Currently circulating folio.
4805 * @new: Replacement folio.
4806 *
4807 * Charge @new as a replacement folio for @old. @old will
4808 * be uncharged upon free.
4809 *
4810 * Both folios must be locked, @new->mapping must be set up.
4811 */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4812 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4813 {
4814 struct mem_cgroup *memcg;
4815 long nr_pages = folio_nr_pages(new);
4816
4817 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4818 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4819 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4820 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4821
4822 if (mem_cgroup_disabled())
4823 return;
4824
4825 /* Page cache replacement: new folio already charged? */
4826 if (folio_memcg_charged(new))
4827 return;
4828
4829 memcg = folio_memcg(old);
4830 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4831 if (!memcg)
4832 return;
4833
4834 /* Force-charge the new page. The old one will be freed soon */
4835 if (!mem_cgroup_is_root(memcg)) {
4836 page_counter_charge(&memcg->memory, nr_pages);
4837 if (do_memsw_account())
4838 page_counter_charge(&memcg->memsw, nr_pages);
4839 }
4840
4841 css_get(&memcg->css);
4842 commit_charge(new, memcg);
4843 memcg1_commit_charge(new, memcg);
4844 }
4845
4846 /**
4847 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4848 * @old: Currently circulating folio.
4849 * @new: Replacement folio.
4850 *
4851 * Transfer the memcg data from the old folio to the new folio for migration.
4852 * The old folio's data info will be cleared. Note that the memory counters
4853 * will remain unchanged throughout the process.
4854 *
4855 * Both folios must be locked, @new->mapping must be set up.
4856 */
mem_cgroup_migrate(struct folio * old,struct folio * new)4857 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4858 {
4859 struct mem_cgroup *memcg;
4860
4861 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4862 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4863 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4864 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4865 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4866
4867 if (mem_cgroup_disabled())
4868 return;
4869
4870 memcg = folio_memcg(old);
4871 /*
4872 * Note that it is normal to see !memcg for a hugetlb folio.
4873 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4874 * was not selected.
4875 */
4876 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4877 if (!memcg)
4878 return;
4879
4880 /* Transfer the charge and the css ref */
4881 commit_charge(new, memcg);
4882
4883 /* Warning should never happen, so don't worry about refcount non-0 */
4884 WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4885 old->memcg_data = 0;
4886 }
4887
4888 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4889 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4890
mem_cgroup_sk_alloc(struct sock * sk)4891 void mem_cgroup_sk_alloc(struct sock *sk)
4892 {
4893 struct mem_cgroup *memcg;
4894
4895 if (!mem_cgroup_sockets_enabled)
4896 return;
4897
4898 /* Do not associate the sock with unrelated interrupted task's memcg. */
4899 if (!in_task())
4900 return;
4901
4902 rcu_read_lock();
4903 memcg = mem_cgroup_from_task(current);
4904 if (mem_cgroup_is_root(memcg))
4905 goto out;
4906 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4907 goto out;
4908 if (css_tryget(&memcg->css))
4909 sk->sk_memcg = memcg;
4910 out:
4911 rcu_read_unlock();
4912 }
4913
mem_cgroup_sk_free(struct sock * sk)4914 void mem_cgroup_sk_free(struct sock *sk)
4915 {
4916 if (sk->sk_memcg)
4917 css_put(&sk->sk_memcg->css);
4918 }
4919
4920 /**
4921 * mem_cgroup_charge_skmem - charge socket memory
4922 * @memcg: memcg to charge
4923 * @nr_pages: number of pages to charge
4924 * @gfp_mask: reclaim mode
4925 *
4926 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4927 * @memcg's configured limit, %false if it doesn't.
4928 */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4929 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4930 gfp_t gfp_mask)
4931 {
4932 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4933 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4934
4935 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
4936 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4937 return true;
4938 }
4939
4940 return false;
4941 }
4942
4943 /**
4944 * mem_cgroup_uncharge_skmem - uncharge socket memory
4945 * @memcg: memcg to uncharge
4946 * @nr_pages: number of pages to uncharge
4947 */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4948 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4949 {
4950 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4951 memcg1_uncharge_skmem(memcg, nr_pages);
4952 return;
4953 }
4954
4955 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4956
4957 refill_stock(memcg, nr_pages);
4958 }
4959
cgroup_memory(char * s)4960 static int __init cgroup_memory(char *s)
4961 {
4962 char *token;
4963
4964 while ((token = strsep(&s, ",")) != NULL) {
4965 if (!*token)
4966 continue;
4967 if (!strcmp(token, "nosocket"))
4968 cgroup_memory_nosocket = true;
4969 if (!strcmp(token, "nokmem"))
4970 cgroup_memory_nokmem = true;
4971 if (!strcmp(token, "nobpf"))
4972 cgroup_memory_nobpf = true;
4973 }
4974 return 1;
4975 }
4976 __setup("cgroup.memory=", cgroup_memory);
4977
4978 /*
4979 * subsys_initcall() for memory controller.
4980 *
4981 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4982 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4983 * basically everything that doesn't depend on a specific mem_cgroup structure
4984 * should be initialized from here.
4985 */
mem_cgroup_init(void)4986 static int __init mem_cgroup_init(void)
4987 {
4988 int cpu;
4989
4990 /*
4991 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4992 * used for per-memcg-per-cpu caching of per-node statistics. In order
4993 * to work fine, we should make sure that the overfill threshold can't
4994 * exceed S32_MAX / PAGE_SIZE.
4995 */
4996 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4997
4998 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4999 memcg_hotplug_cpu_dead);
5000
5001 for_each_possible_cpu(cpu)
5002 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5003 drain_local_stock);
5004
5005 return 0;
5006 }
5007 subsys_initcall(mem_cgroup_init);
5008
5009 #ifdef CONFIG_SWAP
5010 /**
5011 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5012 * @folio: folio being added to swap
5013 * @entry: swap entry to charge
5014 *
5015 * Try to charge @folio's memcg for the swap space at @entry.
5016 *
5017 * Returns 0 on success, -ENOMEM on failure.
5018 */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5019 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5020 {
5021 unsigned int nr_pages = folio_nr_pages(folio);
5022 struct page_counter *counter;
5023 struct mem_cgroup *memcg;
5024
5025 if (do_memsw_account())
5026 return 0;
5027
5028 memcg = folio_memcg(folio);
5029
5030 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5031 if (!memcg)
5032 return 0;
5033
5034 if (!entry.val) {
5035 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5036 return 0;
5037 }
5038
5039 memcg = mem_cgroup_id_get_online(memcg);
5040
5041 if (!mem_cgroup_is_root(memcg) &&
5042 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5043 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5044 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5045 mem_cgroup_id_put(memcg);
5046 return -ENOMEM;
5047 }
5048
5049 /* Get references for the tail pages, too */
5050 if (nr_pages > 1)
5051 mem_cgroup_id_get_many(memcg, nr_pages - 1);
5052 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5053
5054 swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
5055
5056 return 0;
5057 }
5058
5059 /**
5060 * __mem_cgroup_uncharge_swap - uncharge swap space
5061 * @entry: swap entry to uncharge
5062 * @nr_pages: the amount of swap space to uncharge
5063 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5064 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5065 {
5066 struct mem_cgroup *memcg;
5067 unsigned short id;
5068
5069 id = swap_cgroup_clear(entry, nr_pages);
5070 rcu_read_lock();
5071 memcg = mem_cgroup_from_id(id);
5072 if (memcg) {
5073 if (!mem_cgroup_is_root(memcg)) {
5074 if (do_memsw_account())
5075 page_counter_uncharge(&memcg->memsw, nr_pages);
5076 else
5077 page_counter_uncharge(&memcg->swap, nr_pages);
5078 }
5079 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5080 mem_cgroup_id_put_many(memcg, nr_pages);
5081 }
5082 rcu_read_unlock();
5083 }
5084
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5085 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5086 {
5087 long nr_swap_pages = get_nr_swap_pages();
5088
5089 if (mem_cgroup_disabled() || do_memsw_account())
5090 return nr_swap_pages;
5091 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5092 nr_swap_pages = min_t(long, nr_swap_pages,
5093 READ_ONCE(memcg->swap.max) -
5094 page_counter_read(&memcg->swap));
5095 return nr_swap_pages;
5096 }
5097
mem_cgroup_swap_full(struct folio * folio)5098 bool mem_cgroup_swap_full(struct folio *folio)
5099 {
5100 struct mem_cgroup *memcg;
5101
5102 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5103
5104 if (vm_swap_full())
5105 return true;
5106 if (do_memsw_account())
5107 return false;
5108
5109 memcg = folio_memcg(folio);
5110 if (!memcg)
5111 return false;
5112
5113 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5114 unsigned long usage = page_counter_read(&memcg->swap);
5115
5116 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5117 usage * 2 >= READ_ONCE(memcg->swap.max))
5118 return true;
5119 }
5120
5121 return false;
5122 }
5123
setup_swap_account(char * s)5124 static int __init setup_swap_account(char *s)
5125 {
5126 bool res;
5127
5128 if (!kstrtobool(s, &res) && !res)
5129 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5130 "in favor of configuring swap control via cgroupfs. "
5131 "Please report your usecase to linux-mm@kvack.org if you "
5132 "depend on this functionality.\n");
5133 return 1;
5134 }
5135 __setup("swapaccount=", setup_swap_account);
5136
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5137 static u64 swap_current_read(struct cgroup_subsys_state *css,
5138 struct cftype *cft)
5139 {
5140 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5141
5142 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5143 }
5144
swap_peak_show(struct seq_file * sf,void * v)5145 static int swap_peak_show(struct seq_file *sf, void *v)
5146 {
5147 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5148
5149 return peak_show(sf, v, &memcg->swap);
5150 }
5151
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5152 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5153 size_t nbytes, loff_t off)
5154 {
5155 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5156
5157 return peak_write(of, buf, nbytes, off, &memcg->swap,
5158 &memcg->swap_peaks);
5159 }
5160
swap_high_show(struct seq_file * m,void * v)5161 static int swap_high_show(struct seq_file *m, void *v)
5162 {
5163 return seq_puts_memcg_tunable(m,
5164 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5165 }
5166
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5167 static ssize_t swap_high_write(struct kernfs_open_file *of,
5168 char *buf, size_t nbytes, loff_t off)
5169 {
5170 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5171 unsigned long high;
5172 int err;
5173
5174 buf = strstrip(buf);
5175 err = page_counter_memparse(buf, "max", &high);
5176 if (err)
5177 return err;
5178
5179 page_counter_set_high(&memcg->swap, high);
5180
5181 return nbytes;
5182 }
5183
swap_max_show(struct seq_file * m,void * v)5184 static int swap_max_show(struct seq_file *m, void *v)
5185 {
5186 return seq_puts_memcg_tunable(m,
5187 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5188 }
5189
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5190 static ssize_t swap_max_write(struct kernfs_open_file *of,
5191 char *buf, size_t nbytes, loff_t off)
5192 {
5193 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5194 unsigned long max;
5195 int err;
5196
5197 buf = strstrip(buf);
5198 err = page_counter_memparse(buf, "max", &max);
5199 if (err)
5200 return err;
5201
5202 xchg(&memcg->swap.max, max);
5203
5204 return nbytes;
5205 }
5206
swap_events_show(struct seq_file * m,void * v)5207 static int swap_events_show(struct seq_file *m, void *v)
5208 {
5209 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5210
5211 seq_printf(m, "high %lu\n",
5212 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5213 seq_printf(m, "max %lu\n",
5214 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5215 seq_printf(m, "fail %lu\n",
5216 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5217
5218 return 0;
5219 }
5220
5221 static struct cftype swap_files[] = {
5222 {
5223 .name = "swap.current",
5224 .flags = CFTYPE_NOT_ON_ROOT,
5225 .read_u64 = swap_current_read,
5226 },
5227 {
5228 .name = "swap.high",
5229 .flags = CFTYPE_NOT_ON_ROOT,
5230 .seq_show = swap_high_show,
5231 .write = swap_high_write,
5232 },
5233 {
5234 .name = "swap.max",
5235 .flags = CFTYPE_NOT_ON_ROOT,
5236 .seq_show = swap_max_show,
5237 .write = swap_max_write,
5238 },
5239 {
5240 .name = "swap.peak",
5241 .flags = CFTYPE_NOT_ON_ROOT,
5242 .open = peak_open,
5243 .release = peak_release,
5244 .seq_show = swap_peak_show,
5245 .write = swap_peak_write,
5246 },
5247 {
5248 .name = "swap.events",
5249 .flags = CFTYPE_NOT_ON_ROOT,
5250 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5251 .seq_show = swap_events_show,
5252 },
5253 { } /* terminate */
5254 };
5255
5256 #ifdef CONFIG_ZSWAP
5257 /**
5258 * obj_cgroup_may_zswap - check if this cgroup can zswap
5259 * @objcg: the object cgroup
5260 *
5261 * Check if the hierarchical zswap limit has been reached.
5262 *
5263 * This doesn't check for specific headroom, and it is not atomic
5264 * either. But with zswap, the size of the allocation is only known
5265 * once compression has occurred, and this optimistic pre-check avoids
5266 * spending cycles on compression when there is already no room left
5267 * or zswap is disabled altogether somewhere in the hierarchy.
5268 */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5269 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5270 {
5271 struct mem_cgroup *memcg, *original_memcg;
5272 bool ret = true;
5273
5274 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5275 return true;
5276
5277 original_memcg = get_mem_cgroup_from_objcg(objcg);
5278 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5279 memcg = parent_mem_cgroup(memcg)) {
5280 unsigned long max = READ_ONCE(memcg->zswap_max);
5281 unsigned long pages;
5282
5283 if (max == PAGE_COUNTER_MAX)
5284 continue;
5285 if (max == 0) {
5286 ret = false;
5287 break;
5288 }
5289
5290 /* Force flush to get accurate stats for charging */
5291 __mem_cgroup_flush_stats(memcg, true);
5292 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5293 if (pages < max)
5294 continue;
5295 ret = false;
5296 break;
5297 }
5298 mem_cgroup_put(original_memcg);
5299 return ret;
5300 }
5301
5302 /**
5303 * obj_cgroup_charge_zswap - charge compression backend memory
5304 * @objcg: the object cgroup
5305 * @size: size of compressed object
5306 *
5307 * This forces the charge after obj_cgroup_may_zswap() allowed
5308 * compression and storage in zwap for this cgroup to go ahead.
5309 */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5310 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5311 {
5312 struct mem_cgroup *memcg;
5313
5314 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5315 return;
5316
5317 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5318
5319 /* PF_MEMALLOC context, charging must succeed */
5320 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5321 VM_WARN_ON_ONCE(1);
5322
5323 rcu_read_lock();
5324 memcg = obj_cgroup_memcg(objcg);
5325 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5326 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5327 rcu_read_unlock();
5328 }
5329
5330 /**
5331 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5332 * @objcg: the object cgroup
5333 * @size: size of compressed object
5334 *
5335 * Uncharges zswap memory on page in.
5336 */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5337 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5338 {
5339 struct mem_cgroup *memcg;
5340
5341 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5342 return;
5343
5344 obj_cgroup_uncharge(objcg, size);
5345
5346 rcu_read_lock();
5347 memcg = obj_cgroup_memcg(objcg);
5348 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5349 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5350 rcu_read_unlock();
5351 }
5352
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5353 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5354 {
5355 /* if zswap is disabled, do not block pages going to the swapping device */
5356 if (!zswap_is_enabled())
5357 return true;
5358
5359 for (; memcg; memcg = parent_mem_cgroup(memcg))
5360 if (!READ_ONCE(memcg->zswap_writeback))
5361 return false;
5362
5363 return true;
5364 }
5365
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5366 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5367 struct cftype *cft)
5368 {
5369 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5370
5371 mem_cgroup_flush_stats(memcg);
5372 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5373 }
5374
zswap_max_show(struct seq_file * m,void * v)5375 static int zswap_max_show(struct seq_file *m, void *v)
5376 {
5377 return seq_puts_memcg_tunable(m,
5378 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5379 }
5380
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5381 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5382 char *buf, size_t nbytes, loff_t off)
5383 {
5384 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5385 unsigned long max;
5386 int err;
5387
5388 buf = strstrip(buf);
5389 err = page_counter_memparse(buf, "max", &max);
5390 if (err)
5391 return err;
5392
5393 xchg(&memcg->zswap_max, max);
5394
5395 return nbytes;
5396 }
5397
zswap_writeback_show(struct seq_file * m,void * v)5398 static int zswap_writeback_show(struct seq_file *m, void *v)
5399 {
5400 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5401
5402 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5403 return 0;
5404 }
5405
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5406 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5407 char *buf, size_t nbytes, loff_t off)
5408 {
5409 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5410 int zswap_writeback;
5411 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5412
5413 if (parse_ret)
5414 return parse_ret;
5415
5416 if (zswap_writeback != 0 && zswap_writeback != 1)
5417 return -EINVAL;
5418
5419 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5420 return nbytes;
5421 }
5422
5423 static struct cftype zswap_files[] = {
5424 {
5425 .name = "zswap.current",
5426 .flags = CFTYPE_NOT_ON_ROOT,
5427 .read_u64 = zswap_current_read,
5428 },
5429 {
5430 .name = "zswap.max",
5431 .flags = CFTYPE_NOT_ON_ROOT,
5432 .seq_show = zswap_max_show,
5433 .write = zswap_max_write,
5434 },
5435 {
5436 .name = "zswap.writeback",
5437 .seq_show = zswap_writeback_show,
5438 .write = zswap_writeback_write,
5439 },
5440 { } /* terminate */
5441 };
5442 #endif /* CONFIG_ZSWAP */
5443
mem_cgroup_swap_init(void)5444 static int __init mem_cgroup_swap_init(void)
5445 {
5446 if (mem_cgroup_disabled())
5447 return 0;
5448
5449 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5450 #ifdef CONFIG_MEMCG_V1
5451 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5452 #endif
5453 #ifdef CONFIG_ZSWAP
5454 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5455 #endif
5456 return 0;
5457 }
5458 subsys_initcall(mem_cgroup_swap_init);
5459
5460 #endif /* CONFIG_SWAP */
5461