1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/cgroup-defs.h> 29 #include <linux/page_counter.h> 30 #include <linux/memcontrol.h> 31 #include <linux/cgroup.h> 32 #include <linux/cpuset.h> 33 #include <linux/sched/mm.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/hugetlb.h> 36 #include <linux/pagemap.h> 37 #include <linux/pagevec.h> 38 #include <linux/vm_event_item.h> 39 #include <linux/smp.h> 40 #include <linux/page-flags.h> 41 #include <linux/backing-dev.h> 42 #include <linux/bit_spinlock.h> 43 #include <linux/rcupdate.h> 44 #include <linux/limits.h> 45 #include <linux/export.h> 46 #include <linux/list.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swapops.h> 51 #include <linux/spinlock.h> 52 #include <linux/fs.h> 53 #include <linux/seq_file.h> 54 #include <linux/vmpressure.h> 55 #include <linux/memremap.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/resume_user_mode.h> 62 #include <linux/psi.h> 63 #include <linux/seq_buf.h> 64 #include <linux/sched/isolation.h> 65 #include <linux/kmemleak.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 #include "memcontrol-v1.h" 71 72 #include <linux/uaccess.h> 73 74 #define CREATE_TRACE_POINTS 75 #include <trace/events/memcg.h> 76 #undef CREATE_TRACE_POINTS 77 78 #include <trace/events/vmscan.h> 79 80 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 81 EXPORT_SYMBOL(memory_cgrp_subsys); 82 83 struct mem_cgroup *root_mem_cgroup __read_mostly; 84 EXPORT_SYMBOL(root_mem_cgroup); 85 86 /* Active memory cgroup to use from an interrupt context */ 87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 89 90 /* Socket memory accounting disabled? */ 91 static bool cgroup_memory_nosocket __ro_after_init; 92 93 /* Kernel memory accounting disabled? */ 94 static bool cgroup_memory_nokmem __ro_after_init; 95 96 /* BPF memory accounting disabled? */ 97 static bool cgroup_memory_nobpf __ro_after_init; 98 99 static struct kmem_cache *memcg_cachep; 100 static struct kmem_cache *memcg_pn_cachep; 101 102 #ifdef CONFIG_CGROUP_WRITEBACK 103 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 104 #endif 105 106 static inline bool task_is_dying(void) 107 { 108 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 109 (current->flags & PF_EXITING); 110 } 111 112 /* Some nice accessors for the vmpressure. */ 113 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 114 { 115 if (!memcg) 116 memcg = root_mem_cgroup; 117 return &memcg->vmpressure; 118 } 119 120 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 121 { 122 return container_of(vmpr, struct mem_cgroup, vmpressure); 123 } 124 125 #define SEQ_BUF_SIZE SZ_4K 126 #define CURRENT_OBJCG_UPDATE_BIT 0 127 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT) 128 129 static DEFINE_SPINLOCK(objcg_lock); 130 131 bool mem_cgroup_kmem_disabled(void) 132 { 133 return cgroup_memory_nokmem; 134 } 135 136 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 137 138 static void obj_cgroup_release(struct percpu_ref *ref) 139 { 140 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 141 unsigned int nr_bytes; 142 unsigned int nr_pages; 143 unsigned long flags; 144 145 /* 146 * At this point all allocated objects are freed, and 147 * objcg->nr_charged_bytes can't have an arbitrary byte value. 148 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 149 * 150 * The following sequence can lead to it: 151 * 1) CPU0: objcg == stock->cached_objcg 152 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 153 * PAGE_SIZE bytes are charged 154 * 3) CPU1: a process from another memcg is allocating something, 155 * the stock if flushed, 156 * objcg->nr_charged_bytes = PAGE_SIZE - 92 157 * 5) CPU0: we do release this object, 158 * 92 bytes are added to stock->nr_bytes 159 * 6) CPU0: stock is flushed, 160 * 92 bytes are added to objcg->nr_charged_bytes 161 * 162 * In the result, nr_charged_bytes == PAGE_SIZE. 163 * This page will be uncharged in obj_cgroup_release(). 164 */ 165 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 166 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 167 nr_pages = nr_bytes >> PAGE_SHIFT; 168 169 if (nr_pages) { 170 struct mem_cgroup *memcg; 171 172 memcg = get_mem_cgroup_from_objcg(objcg); 173 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 174 memcg1_account_kmem(memcg, -nr_pages); 175 if (!mem_cgroup_is_root(memcg)) 176 memcg_uncharge(memcg, nr_pages); 177 mem_cgroup_put(memcg); 178 } 179 180 spin_lock_irqsave(&objcg_lock, flags); 181 list_del(&objcg->list); 182 spin_unlock_irqrestore(&objcg_lock, flags); 183 184 percpu_ref_exit(ref); 185 kfree_rcu(objcg, rcu); 186 } 187 188 static struct obj_cgroup *obj_cgroup_alloc(void) 189 { 190 struct obj_cgroup *objcg; 191 int ret; 192 193 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 194 if (!objcg) 195 return NULL; 196 197 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 198 GFP_KERNEL); 199 if (ret) { 200 kfree(objcg); 201 return NULL; 202 } 203 INIT_LIST_HEAD(&objcg->list); 204 return objcg; 205 } 206 207 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 208 struct mem_cgroup *parent) 209 { 210 struct obj_cgroup *objcg, *iter; 211 212 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 213 214 spin_lock_irq(&objcg_lock); 215 216 /* 1) Ready to reparent active objcg. */ 217 list_add(&objcg->list, &memcg->objcg_list); 218 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 219 list_for_each_entry(iter, &memcg->objcg_list, list) 220 WRITE_ONCE(iter->memcg, parent); 221 /* 3) Move already reparented objcgs to the parent's list */ 222 list_splice(&memcg->objcg_list, &parent->objcg_list); 223 224 spin_unlock_irq(&objcg_lock); 225 226 percpu_ref_kill(&objcg->refcnt); 227 } 228 229 /* 230 * A lot of the calls to the cache allocation functions are expected to be 231 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are 232 * conditional to this static branch, we'll have to allow modules that does 233 * kmem_cache_alloc and the such to see this symbol as well 234 */ 235 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 236 EXPORT_SYMBOL(memcg_kmem_online_key); 237 238 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 239 EXPORT_SYMBOL(memcg_bpf_enabled_key); 240 241 /** 242 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 243 * @folio: folio of interest 244 * 245 * If memcg is bound to the default hierarchy, css of the memcg associated 246 * with @folio is returned. The returned css remains associated with @folio 247 * until it is released. 248 * 249 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 250 * is returned. 251 */ 252 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 253 { 254 struct mem_cgroup *memcg = folio_memcg(folio); 255 256 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 257 memcg = root_mem_cgroup; 258 259 return &memcg->css; 260 } 261 262 /** 263 * page_cgroup_ino - return inode number of the memcg a page is charged to 264 * @page: the page 265 * 266 * Look up the closest online ancestor of the memory cgroup @page is charged to 267 * and return its inode number or 0 if @page is not charged to any cgroup. It 268 * is safe to call this function without holding a reference to @page. 269 * 270 * Note, this function is inherently racy, because there is nothing to prevent 271 * the cgroup inode from getting torn down and potentially reallocated a moment 272 * after page_cgroup_ino() returns, so it only should be used by callers that 273 * do not care (such as procfs interfaces). 274 */ 275 ino_t page_cgroup_ino(struct page *page) 276 { 277 struct mem_cgroup *memcg; 278 unsigned long ino = 0; 279 280 rcu_read_lock(); 281 /* page_folio() is racy here, but the entire function is racy anyway */ 282 memcg = folio_memcg_check(page_folio(page)); 283 284 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 285 memcg = parent_mem_cgroup(memcg); 286 if (memcg) 287 ino = cgroup_ino(memcg->css.cgroup); 288 rcu_read_unlock(); 289 return ino; 290 } 291 EXPORT_SYMBOL_GPL(page_cgroup_ino); 292 293 /* Subset of node_stat_item for memcg stats */ 294 static const unsigned int memcg_node_stat_items[] = { 295 NR_INACTIVE_ANON, 296 NR_ACTIVE_ANON, 297 NR_INACTIVE_FILE, 298 NR_ACTIVE_FILE, 299 NR_UNEVICTABLE, 300 NR_SLAB_RECLAIMABLE_B, 301 NR_SLAB_UNRECLAIMABLE_B, 302 WORKINGSET_REFAULT_ANON, 303 WORKINGSET_REFAULT_FILE, 304 WORKINGSET_ACTIVATE_ANON, 305 WORKINGSET_ACTIVATE_FILE, 306 WORKINGSET_RESTORE_ANON, 307 WORKINGSET_RESTORE_FILE, 308 WORKINGSET_NODERECLAIM, 309 NR_ANON_MAPPED, 310 NR_FILE_MAPPED, 311 NR_FILE_PAGES, 312 NR_FILE_DIRTY, 313 NR_WRITEBACK, 314 NR_SHMEM, 315 NR_SHMEM_THPS, 316 NR_FILE_THPS, 317 NR_ANON_THPS, 318 NR_KERNEL_STACK_KB, 319 NR_PAGETABLE, 320 NR_SECONDARY_PAGETABLE, 321 #ifdef CONFIG_SWAP 322 NR_SWAPCACHE, 323 #endif 324 #ifdef CONFIG_NUMA_BALANCING 325 PGPROMOTE_SUCCESS, 326 #endif 327 PGDEMOTE_KSWAPD, 328 PGDEMOTE_DIRECT, 329 PGDEMOTE_KHUGEPAGED, 330 PGDEMOTE_PROACTIVE, 331 #ifdef CONFIG_HUGETLB_PAGE 332 NR_HUGETLB, 333 #endif 334 }; 335 336 static const unsigned int memcg_stat_items[] = { 337 MEMCG_SWAP, 338 MEMCG_SOCK, 339 MEMCG_PERCPU_B, 340 MEMCG_VMALLOC, 341 MEMCG_KMEM, 342 MEMCG_ZSWAP_B, 343 MEMCG_ZSWAPPED, 344 }; 345 346 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items) 347 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \ 348 ARRAY_SIZE(memcg_stat_items)) 349 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX) 350 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly; 351 352 static void init_memcg_stats(void) 353 { 354 u8 i, j = 0; 355 356 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX); 357 358 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index)); 359 360 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j) 361 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j; 362 363 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j) 364 mem_cgroup_stats_index[memcg_stat_items[i]] = j; 365 } 366 367 static inline int memcg_stats_index(int idx) 368 { 369 return mem_cgroup_stats_index[idx]; 370 } 371 372 struct lruvec_stats_percpu { 373 /* Local (CPU and cgroup) state */ 374 long state[NR_MEMCG_NODE_STAT_ITEMS]; 375 376 /* Delta calculation for lockless upward propagation */ 377 long state_prev[NR_MEMCG_NODE_STAT_ITEMS]; 378 }; 379 380 struct lruvec_stats { 381 /* Aggregated (CPU and subtree) state */ 382 long state[NR_MEMCG_NODE_STAT_ITEMS]; 383 384 /* Non-hierarchical (CPU aggregated) state */ 385 long state_local[NR_MEMCG_NODE_STAT_ITEMS]; 386 387 /* Pending child counts during tree propagation */ 388 long state_pending[NR_MEMCG_NODE_STAT_ITEMS]; 389 }; 390 391 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) 392 { 393 struct mem_cgroup_per_node *pn; 394 long x; 395 int i; 396 397 if (mem_cgroup_disabled()) 398 return node_page_state(lruvec_pgdat(lruvec), idx); 399 400 i = memcg_stats_index(idx); 401 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 402 return 0; 403 404 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 405 x = READ_ONCE(pn->lruvec_stats->state[i]); 406 #ifdef CONFIG_SMP 407 if (x < 0) 408 x = 0; 409 #endif 410 return x; 411 } 412 413 unsigned long lruvec_page_state_local(struct lruvec *lruvec, 414 enum node_stat_item idx) 415 { 416 struct mem_cgroup_per_node *pn; 417 long x; 418 int i; 419 420 if (mem_cgroup_disabled()) 421 return node_page_state(lruvec_pgdat(lruvec), idx); 422 423 i = memcg_stats_index(idx); 424 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 425 return 0; 426 427 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 428 x = READ_ONCE(pn->lruvec_stats->state_local[i]); 429 #ifdef CONFIG_SMP 430 if (x < 0) 431 x = 0; 432 #endif 433 return x; 434 } 435 436 /* Subset of vm_event_item to report for memcg event stats */ 437 static const unsigned int memcg_vm_event_stat[] = { 438 #ifdef CONFIG_MEMCG_V1 439 PGPGIN, 440 PGPGOUT, 441 #endif 442 PSWPIN, 443 PSWPOUT, 444 PGSCAN_KSWAPD, 445 PGSCAN_DIRECT, 446 PGSCAN_KHUGEPAGED, 447 PGSCAN_PROACTIVE, 448 PGSTEAL_KSWAPD, 449 PGSTEAL_DIRECT, 450 PGSTEAL_KHUGEPAGED, 451 PGSTEAL_PROACTIVE, 452 PGFAULT, 453 PGMAJFAULT, 454 PGREFILL, 455 PGACTIVATE, 456 PGDEACTIVATE, 457 PGLAZYFREE, 458 PGLAZYFREED, 459 #ifdef CONFIG_SWAP 460 SWPIN_ZERO, 461 SWPOUT_ZERO, 462 #endif 463 #ifdef CONFIG_ZSWAP 464 ZSWPIN, 465 ZSWPOUT, 466 ZSWPWB, 467 #endif 468 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 469 THP_FAULT_ALLOC, 470 THP_COLLAPSE_ALLOC, 471 THP_SWPOUT, 472 THP_SWPOUT_FALLBACK, 473 #endif 474 #ifdef CONFIG_NUMA_BALANCING 475 NUMA_PAGE_MIGRATE, 476 NUMA_PTE_UPDATES, 477 NUMA_HINT_FAULTS, 478 #endif 479 }; 480 481 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 482 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 483 484 static void init_memcg_events(void) 485 { 486 u8 i; 487 488 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX); 489 490 memset(mem_cgroup_events_index, U8_MAX, 491 sizeof(mem_cgroup_events_index)); 492 493 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 494 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i; 495 } 496 497 static inline int memcg_events_index(enum vm_event_item idx) 498 { 499 return mem_cgroup_events_index[idx]; 500 } 501 502 struct memcg_vmstats_percpu { 503 /* Stats updates since the last flush */ 504 unsigned int stats_updates; 505 506 /* Cached pointers for fast iteration in memcg_rstat_updated() */ 507 struct memcg_vmstats_percpu __percpu *parent_pcpu; 508 struct memcg_vmstats *vmstats; 509 510 /* The above should fit a single cacheline for memcg_rstat_updated() */ 511 512 /* Local (CPU and cgroup) page state & events */ 513 long state[MEMCG_VMSTAT_SIZE]; 514 unsigned long events[NR_MEMCG_EVENTS]; 515 516 /* Delta calculation for lockless upward propagation */ 517 long state_prev[MEMCG_VMSTAT_SIZE]; 518 unsigned long events_prev[NR_MEMCG_EVENTS]; 519 } ____cacheline_aligned; 520 521 struct memcg_vmstats { 522 /* Aggregated (CPU and subtree) page state & events */ 523 long state[MEMCG_VMSTAT_SIZE]; 524 unsigned long events[NR_MEMCG_EVENTS]; 525 526 /* Non-hierarchical (CPU aggregated) page state & events */ 527 long state_local[MEMCG_VMSTAT_SIZE]; 528 unsigned long events_local[NR_MEMCG_EVENTS]; 529 530 /* Pending child counts during tree propagation */ 531 long state_pending[MEMCG_VMSTAT_SIZE]; 532 unsigned long events_pending[NR_MEMCG_EVENTS]; 533 534 /* Stats updates since the last flush */ 535 atomic_t stats_updates; 536 }; 537 538 /* 539 * memcg and lruvec stats flushing 540 * 541 * Many codepaths leading to stats update or read are performance sensitive and 542 * adding stats flushing in such codepaths is not desirable. So, to optimize the 543 * flushing the kernel does: 544 * 545 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 546 * rstat update tree grow unbounded. 547 * 548 * 2) Flush the stats synchronously on reader side only when there are more than 549 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 550 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 551 * only for 2 seconds due to (1). 552 */ 553 static void flush_memcg_stats_dwork(struct work_struct *w); 554 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 555 static u64 flush_last_time; 556 557 #define FLUSH_TIME (2UL*HZ) 558 559 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) 560 { 561 return atomic_read(&vmstats->stats_updates) > 562 MEMCG_CHARGE_BATCH * num_online_cpus(); 563 } 564 565 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val, 566 int cpu) 567 { 568 struct memcg_vmstats_percpu __percpu *statc_pcpu; 569 struct memcg_vmstats_percpu *statc; 570 unsigned int stats_updates; 571 572 if (!val) 573 return; 574 575 css_rstat_updated(&memcg->css, cpu); 576 statc_pcpu = memcg->vmstats_percpu; 577 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) { 578 statc = this_cpu_ptr(statc_pcpu); 579 /* 580 * If @memcg is already flushable then all its ancestors are 581 * flushable as well and also there is no need to increase 582 * stats_updates. 583 */ 584 if (memcg_vmstats_needs_flush(statc->vmstats)) 585 break; 586 587 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates, 588 abs(val)); 589 if (stats_updates < MEMCG_CHARGE_BATCH) 590 continue; 591 592 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0); 593 atomic_add(stats_updates, &statc->vmstats->stats_updates); 594 } 595 } 596 597 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) 598 { 599 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); 600 601 trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates), 602 force, needs_flush); 603 604 if (!force && !needs_flush) 605 return; 606 607 if (mem_cgroup_is_root(memcg)) 608 WRITE_ONCE(flush_last_time, jiffies_64); 609 610 css_rstat_flush(&memcg->css); 611 } 612 613 /* 614 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree 615 * @memcg: root of the subtree to flush 616 * 617 * Flushing is serialized by the underlying global rstat lock. There is also a 618 * minimum amount of work to be done even if there are no stat updates to flush. 619 * Hence, we only flush the stats if the updates delta exceeds a threshold. This 620 * avoids unnecessary work and contention on the underlying lock. 621 */ 622 void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 623 { 624 if (mem_cgroup_disabled()) 625 return; 626 627 if (!memcg) 628 memcg = root_mem_cgroup; 629 630 __mem_cgroup_flush_stats(memcg, false); 631 } 632 633 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 634 { 635 /* Only flush if the periodic flusher is one full cycle late */ 636 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) 637 mem_cgroup_flush_stats(memcg); 638 } 639 640 static void flush_memcg_stats_dwork(struct work_struct *w) 641 { 642 /* 643 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing 644 * in latency-sensitive paths is as cheap as possible. 645 */ 646 __mem_cgroup_flush_stats(root_mem_cgroup, true); 647 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); 648 } 649 650 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 651 { 652 long x; 653 int i = memcg_stats_index(idx); 654 655 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 656 return 0; 657 658 x = READ_ONCE(memcg->vmstats->state[i]); 659 #ifdef CONFIG_SMP 660 if (x < 0) 661 x = 0; 662 #endif 663 return x; 664 } 665 666 static int memcg_page_state_unit(int item); 667 668 /* 669 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round 670 * up non-zero sub-page updates to 1 page as zero page updates are ignored. 671 */ 672 static int memcg_state_val_in_pages(int idx, int val) 673 { 674 int unit = memcg_page_state_unit(idx); 675 676 if (!val || unit == PAGE_SIZE) 677 return val; 678 else 679 return max(val * unit / PAGE_SIZE, 1UL); 680 } 681 682 /** 683 * mod_memcg_state - update cgroup memory statistics 684 * @memcg: the memory cgroup 685 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 686 * @val: delta to add to the counter, can be negative 687 */ 688 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 689 int val) 690 { 691 int i = memcg_stats_index(idx); 692 int cpu; 693 694 if (mem_cgroup_disabled()) 695 return; 696 697 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 698 return; 699 700 cpu = get_cpu(); 701 702 this_cpu_add(memcg->vmstats_percpu->state[i], val); 703 val = memcg_state_val_in_pages(idx, val); 704 memcg_rstat_updated(memcg, val, cpu); 705 trace_mod_memcg_state(memcg, idx, val); 706 707 put_cpu(); 708 } 709 710 #ifdef CONFIG_MEMCG_V1 711 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 712 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 713 { 714 long x; 715 int i = memcg_stats_index(idx); 716 717 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 718 return 0; 719 720 x = READ_ONCE(memcg->vmstats->state_local[i]); 721 #ifdef CONFIG_SMP 722 if (x < 0) 723 x = 0; 724 #endif 725 return x; 726 } 727 #endif 728 729 static void mod_memcg_lruvec_state(struct lruvec *lruvec, 730 enum node_stat_item idx, 731 int val) 732 { 733 struct mem_cgroup_per_node *pn; 734 struct mem_cgroup *memcg; 735 int i = memcg_stats_index(idx); 736 int cpu; 737 738 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 739 return; 740 741 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 742 memcg = pn->memcg; 743 744 cpu = get_cpu(); 745 746 /* Update memcg */ 747 this_cpu_add(memcg->vmstats_percpu->state[i], val); 748 749 /* Update lruvec */ 750 this_cpu_add(pn->lruvec_stats_percpu->state[i], val); 751 752 val = memcg_state_val_in_pages(idx, val); 753 memcg_rstat_updated(memcg, val, cpu); 754 trace_mod_memcg_lruvec_state(memcg, idx, val); 755 756 put_cpu(); 757 } 758 759 /** 760 * __mod_lruvec_state - update lruvec memory statistics 761 * @lruvec: the lruvec 762 * @idx: the stat item 763 * @val: delta to add to the counter, can be negative 764 * 765 * The lruvec is the intersection of the NUMA node and a cgroup. This 766 * function updates the all three counters that are affected by a 767 * change of state at this level: per-node, per-cgroup, per-lruvec. 768 */ 769 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 770 int val) 771 { 772 /* Update node */ 773 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 774 775 /* Update memcg and lruvec */ 776 if (!mem_cgroup_disabled()) 777 mod_memcg_lruvec_state(lruvec, idx, val); 778 } 779 780 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 781 int val) 782 { 783 struct mem_cgroup *memcg; 784 pg_data_t *pgdat = folio_pgdat(folio); 785 struct lruvec *lruvec; 786 787 rcu_read_lock(); 788 memcg = folio_memcg(folio); 789 /* Untracked pages have no memcg, no lruvec. Update only the node */ 790 if (!memcg) { 791 rcu_read_unlock(); 792 __mod_node_page_state(pgdat, idx, val); 793 return; 794 } 795 796 lruvec = mem_cgroup_lruvec(memcg, pgdat); 797 __mod_lruvec_state(lruvec, idx, val); 798 rcu_read_unlock(); 799 } 800 EXPORT_SYMBOL(__lruvec_stat_mod_folio); 801 802 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 803 { 804 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 805 struct mem_cgroup *memcg; 806 struct lruvec *lruvec; 807 808 rcu_read_lock(); 809 memcg = mem_cgroup_from_slab_obj(p); 810 811 /* 812 * Untracked pages have no memcg, no lruvec. Update only the 813 * node. If we reparent the slab objects to the root memcg, 814 * when we free the slab object, we need to update the per-memcg 815 * vmstats to keep it correct for the root memcg. 816 */ 817 if (!memcg) { 818 __mod_node_page_state(pgdat, idx, val); 819 } else { 820 lruvec = mem_cgroup_lruvec(memcg, pgdat); 821 __mod_lruvec_state(lruvec, idx, val); 822 } 823 rcu_read_unlock(); 824 } 825 826 /** 827 * count_memcg_events - account VM events in a cgroup 828 * @memcg: the memory cgroup 829 * @idx: the event item 830 * @count: the number of events that occurred 831 */ 832 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 833 unsigned long count) 834 { 835 int i = memcg_events_index(idx); 836 int cpu; 837 838 if (mem_cgroup_disabled()) 839 return; 840 841 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 842 return; 843 844 cpu = get_cpu(); 845 846 this_cpu_add(memcg->vmstats_percpu->events[i], count); 847 memcg_rstat_updated(memcg, count, cpu); 848 trace_count_memcg_events(memcg, idx, count); 849 850 put_cpu(); 851 } 852 853 unsigned long memcg_events(struct mem_cgroup *memcg, int event) 854 { 855 int i = memcg_events_index(event); 856 857 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 858 return 0; 859 860 return READ_ONCE(memcg->vmstats->events[i]); 861 } 862 863 #ifdef CONFIG_MEMCG_V1 864 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 865 { 866 int i = memcg_events_index(event); 867 868 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 869 return 0; 870 871 return READ_ONCE(memcg->vmstats->events_local[i]); 872 } 873 #endif 874 875 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 876 { 877 /* 878 * mm_update_next_owner() may clear mm->owner to NULL 879 * if it races with swapoff, page migration, etc. 880 * So this can be called with p == NULL. 881 */ 882 if (unlikely(!p)) 883 return NULL; 884 885 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 886 } 887 EXPORT_SYMBOL(mem_cgroup_from_task); 888 889 static __always_inline struct mem_cgroup *active_memcg(void) 890 { 891 if (!in_task()) 892 return this_cpu_read(int_active_memcg); 893 else 894 return current->active_memcg; 895 } 896 897 /** 898 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 899 * @mm: mm from which memcg should be extracted. It can be NULL. 900 * 901 * Obtain a reference on mm->memcg and returns it if successful. If mm 902 * is NULL, then the memcg is chosen as follows: 903 * 1) The active memcg, if set. 904 * 2) current->mm->memcg, if available 905 * 3) root memcg 906 * If mem_cgroup is disabled, NULL is returned. 907 */ 908 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 909 { 910 struct mem_cgroup *memcg; 911 912 if (mem_cgroup_disabled()) 913 return NULL; 914 915 /* 916 * Page cache insertions can happen without an 917 * actual mm context, e.g. during disk probing 918 * on boot, loopback IO, acct() writes etc. 919 * 920 * No need to css_get on root memcg as the reference 921 * counting is disabled on the root level in the 922 * cgroup core. See CSS_NO_REF. 923 */ 924 if (unlikely(!mm)) { 925 memcg = active_memcg(); 926 if (unlikely(memcg)) { 927 /* remote memcg must hold a ref */ 928 css_get(&memcg->css); 929 return memcg; 930 } 931 mm = current->mm; 932 if (unlikely(!mm)) 933 return root_mem_cgroup; 934 } 935 936 rcu_read_lock(); 937 do { 938 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 939 if (unlikely(!memcg)) 940 memcg = root_mem_cgroup; 941 } while (!css_tryget(&memcg->css)); 942 rcu_read_unlock(); 943 return memcg; 944 } 945 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 946 947 /** 948 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg. 949 */ 950 struct mem_cgroup *get_mem_cgroup_from_current(void) 951 { 952 struct mem_cgroup *memcg; 953 954 if (mem_cgroup_disabled()) 955 return NULL; 956 957 again: 958 rcu_read_lock(); 959 memcg = mem_cgroup_from_task(current); 960 if (!css_tryget(&memcg->css)) { 961 rcu_read_unlock(); 962 goto again; 963 } 964 rcu_read_unlock(); 965 return memcg; 966 } 967 968 /** 969 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg. 970 * @folio: folio from which memcg should be extracted. 971 */ 972 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 973 { 974 struct mem_cgroup *memcg = folio_memcg(folio); 975 976 if (mem_cgroup_disabled()) 977 return NULL; 978 979 rcu_read_lock(); 980 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 981 memcg = root_mem_cgroup; 982 rcu_read_unlock(); 983 return memcg; 984 } 985 986 /** 987 * mem_cgroup_iter - iterate over memory cgroup hierarchy 988 * @root: hierarchy root 989 * @prev: previously returned memcg, NULL on first invocation 990 * @reclaim: cookie for shared reclaim walks, NULL for full walks 991 * 992 * Returns references to children of the hierarchy below @root, or 993 * @root itself, or %NULL after a full round-trip. 994 * 995 * Caller must pass the return value in @prev on subsequent 996 * invocations for reference counting, or use mem_cgroup_iter_break() 997 * to cancel a hierarchy walk before the round-trip is complete. 998 * 999 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1000 * in the hierarchy among all concurrent reclaimers operating on the 1001 * same node. 1002 */ 1003 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1004 struct mem_cgroup *prev, 1005 struct mem_cgroup_reclaim_cookie *reclaim) 1006 { 1007 struct mem_cgroup_reclaim_iter *iter; 1008 struct cgroup_subsys_state *css; 1009 struct mem_cgroup *pos; 1010 struct mem_cgroup *next; 1011 1012 if (mem_cgroup_disabled()) 1013 return NULL; 1014 1015 if (!root) 1016 root = root_mem_cgroup; 1017 1018 rcu_read_lock(); 1019 restart: 1020 next = NULL; 1021 1022 if (reclaim) { 1023 int gen; 1024 int nid = reclaim->pgdat->node_id; 1025 1026 iter = &root->nodeinfo[nid]->iter; 1027 gen = atomic_read(&iter->generation); 1028 1029 /* 1030 * On start, join the current reclaim iteration cycle. 1031 * Exit when a concurrent walker completes it. 1032 */ 1033 if (!prev) 1034 reclaim->generation = gen; 1035 else if (reclaim->generation != gen) 1036 goto out_unlock; 1037 1038 pos = READ_ONCE(iter->position); 1039 } else 1040 pos = prev; 1041 1042 css = pos ? &pos->css : NULL; 1043 1044 while ((css = css_next_descendant_pre(css, &root->css))) { 1045 /* 1046 * Verify the css and acquire a reference. The root 1047 * is provided by the caller, so we know it's alive 1048 * and kicking, and don't take an extra reference. 1049 */ 1050 if (css == &root->css || css_tryget(css)) 1051 break; 1052 } 1053 1054 next = mem_cgroup_from_css(css); 1055 1056 if (reclaim) { 1057 /* 1058 * The position could have already been updated by a competing 1059 * thread, so check that the value hasn't changed since we read 1060 * it to avoid reclaiming from the same cgroup twice. 1061 */ 1062 if (cmpxchg(&iter->position, pos, next) != pos) { 1063 if (css && css != &root->css) 1064 css_put(css); 1065 goto restart; 1066 } 1067 1068 if (!next) { 1069 atomic_inc(&iter->generation); 1070 1071 /* 1072 * Reclaimers share the hierarchy walk, and a 1073 * new one might jump in right at the end of 1074 * the hierarchy - make sure they see at least 1075 * one group and restart from the beginning. 1076 */ 1077 if (!prev) 1078 goto restart; 1079 } 1080 } 1081 1082 out_unlock: 1083 rcu_read_unlock(); 1084 if (prev && prev != root) 1085 css_put(&prev->css); 1086 1087 return next; 1088 } 1089 1090 /** 1091 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1092 * @root: hierarchy root 1093 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1094 */ 1095 void mem_cgroup_iter_break(struct mem_cgroup *root, 1096 struct mem_cgroup *prev) 1097 { 1098 if (!root) 1099 root = root_mem_cgroup; 1100 if (prev && prev != root) 1101 css_put(&prev->css); 1102 } 1103 1104 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1105 struct mem_cgroup *dead_memcg) 1106 { 1107 struct mem_cgroup_reclaim_iter *iter; 1108 struct mem_cgroup_per_node *mz; 1109 int nid; 1110 1111 for_each_node(nid) { 1112 mz = from->nodeinfo[nid]; 1113 iter = &mz->iter; 1114 cmpxchg(&iter->position, dead_memcg, NULL); 1115 } 1116 } 1117 1118 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1119 { 1120 struct mem_cgroup *memcg = dead_memcg; 1121 struct mem_cgroup *last; 1122 1123 do { 1124 __invalidate_reclaim_iterators(memcg, dead_memcg); 1125 last = memcg; 1126 } while ((memcg = parent_mem_cgroup(memcg))); 1127 1128 /* 1129 * When cgroup1 non-hierarchy mode is used, 1130 * parent_mem_cgroup() does not walk all the way up to the 1131 * cgroup root (root_mem_cgroup). So we have to handle 1132 * dead_memcg from cgroup root separately. 1133 */ 1134 if (!mem_cgroup_is_root(last)) 1135 __invalidate_reclaim_iterators(root_mem_cgroup, 1136 dead_memcg); 1137 } 1138 1139 /** 1140 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1141 * @memcg: hierarchy root 1142 * @fn: function to call for each task 1143 * @arg: argument passed to @fn 1144 * 1145 * This function iterates over tasks attached to @memcg or to any of its 1146 * descendants and calls @fn for each task. If @fn returns a non-zero 1147 * value, the function breaks the iteration loop. Otherwise, it will iterate 1148 * over all tasks and return 0. 1149 * 1150 * This function must not be called for the root memory cgroup. 1151 */ 1152 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1153 int (*fn)(struct task_struct *, void *), void *arg) 1154 { 1155 struct mem_cgroup *iter; 1156 int ret = 0; 1157 1158 BUG_ON(mem_cgroup_is_root(memcg)); 1159 1160 for_each_mem_cgroup_tree(iter, memcg) { 1161 struct css_task_iter it; 1162 struct task_struct *task; 1163 1164 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1165 while (!ret && (task = css_task_iter_next(&it))) { 1166 ret = fn(task, arg); 1167 /* Avoid potential softlockup warning */ 1168 cond_resched(); 1169 } 1170 css_task_iter_end(&it); 1171 if (ret) { 1172 mem_cgroup_iter_break(memcg, iter); 1173 break; 1174 } 1175 } 1176 } 1177 1178 #ifdef CONFIG_DEBUG_VM 1179 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1180 { 1181 struct mem_cgroup *memcg; 1182 1183 if (mem_cgroup_disabled()) 1184 return; 1185 1186 memcg = folio_memcg(folio); 1187 1188 if (!memcg) 1189 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1190 else 1191 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1192 } 1193 #endif 1194 1195 /** 1196 * folio_lruvec_lock - Lock the lruvec for a folio. 1197 * @folio: Pointer to the folio. 1198 * 1199 * These functions are safe to use under any of the following conditions: 1200 * - folio locked 1201 * - folio_test_lru false 1202 * - folio frozen (refcount of 0) 1203 * 1204 * Return: The lruvec this folio is on with its lock held. 1205 */ 1206 struct lruvec *folio_lruvec_lock(struct folio *folio) 1207 { 1208 struct lruvec *lruvec = folio_lruvec(folio); 1209 1210 spin_lock(&lruvec->lru_lock); 1211 lruvec_memcg_debug(lruvec, folio); 1212 1213 return lruvec; 1214 } 1215 1216 /** 1217 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1218 * @folio: Pointer to the folio. 1219 * 1220 * These functions are safe to use under any of the following conditions: 1221 * - folio locked 1222 * - folio_test_lru false 1223 * - folio frozen (refcount of 0) 1224 * 1225 * Return: The lruvec this folio is on with its lock held and interrupts 1226 * disabled. 1227 */ 1228 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1229 { 1230 struct lruvec *lruvec = folio_lruvec(folio); 1231 1232 spin_lock_irq(&lruvec->lru_lock); 1233 lruvec_memcg_debug(lruvec, folio); 1234 1235 return lruvec; 1236 } 1237 1238 /** 1239 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1240 * @folio: Pointer to the folio. 1241 * @flags: Pointer to irqsave flags. 1242 * 1243 * These functions are safe to use under any of the following conditions: 1244 * - folio locked 1245 * - folio_test_lru false 1246 * - folio frozen (refcount of 0) 1247 * 1248 * Return: The lruvec this folio is on with its lock held and interrupts 1249 * disabled. 1250 */ 1251 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1252 unsigned long *flags) 1253 { 1254 struct lruvec *lruvec = folio_lruvec(folio); 1255 1256 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1257 lruvec_memcg_debug(lruvec, folio); 1258 1259 return lruvec; 1260 } 1261 1262 /** 1263 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1264 * @lruvec: mem_cgroup per zone lru vector 1265 * @lru: index of lru list the page is sitting on 1266 * @zid: zone id of the accounted pages 1267 * @nr_pages: positive when adding or negative when removing 1268 * 1269 * This function must be called under lru_lock, just before a page is added 1270 * to or just after a page is removed from an lru list. 1271 */ 1272 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1273 int zid, int nr_pages) 1274 { 1275 struct mem_cgroup_per_node *mz; 1276 unsigned long *lru_size; 1277 long size; 1278 1279 if (mem_cgroup_disabled()) 1280 return; 1281 1282 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1283 lru_size = &mz->lru_zone_size[zid][lru]; 1284 1285 if (nr_pages < 0) 1286 *lru_size += nr_pages; 1287 1288 size = *lru_size; 1289 if (WARN_ONCE(size < 0, 1290 "%s(%p, %d, %d): lru_size %ld\n", 1291 __func__, lruvec, lru, nr_pages, size)) { 1292 VM_BUG_ON(1); 1293 *lru_size = 0; 1294 } 1295 1296 if (nr_pages > 0) 1297 *lru_size += nr_pages; 1298 } 1299 1300 /** 1301 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1302 * @memcg: the memory cgroup 1303 * 1304 * Returns the maximum amount of memory @mem can be charged with, in 1305 * pages. 1306 */ 1307 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1308 { 1309 unsigned long margin = 0; 1310 unsigned long count; 1311 unsigned long limit; 1312 1313 count = page_counter_read(&memcg->memory); 1314 limit = READ_ONCE(memcg->memory.max); 1315 if (count < limit) 1316 margin = limit - count; 1317 1318 if (do_memsw_account()) { 1319 count = page_counter_read(&memcg->memsw); 1320 limit = READ_ONCE(memcg->memsw.max); 1321 if (count < limit) 1322 margin = min(margin, limit - count); 1323 else 1324 margin = 0; 1325 } 1326 1327 return margin; 1328 } 1329 1330 struct memory_stat { 1331 const char *name; 1332 unsigned int idx; 1333 }; 1334 1335 static const struct memory_stat memory_stats[] = { 1336 { "anon", NR_ANON_MAPPED }, 1337 { "file", NR_FILE_PAGES }, 1338 { "kernel", MEMCG_KMEM }, 1339 { "kernel_stack", NR_KERNEL_STACK_KB }, 1340 { "pagetables", NR_PAGETABLE }, 1341 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1342 { "percpu", MEMCG_PERCPU_B }, 1343 { "sock", MEMCG_SOCK }, 1344 { "vmalloc", MEMCG_VMALLOC }, 1345 { "shmem", NR_SHMEM }, 1346 #ifdef CONFIG_ZSWAP 1347 { "zswap", MEMCG_ZSWAP_B }, 1348 { "zswapped", MEMCG_ZSWAPPED }, 1349 #endif 1350 { "file_mapped", NR_FILE_MAPPED }, 1351 { "file_dirty", NR_FILE_DIRTY }, 1352 { "file_writeback", NR_WRITEBACK }, 1353 #ifdef CONFIG_SWAP 1354 { "swapcached", NR_SWAPCACHE }, 1355 #endif 1356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1357 { "anon_thp", NR_ANON_THPS }, 1358 { "file_thp", NR_FILE_THPS }, 1359 { "shmem_thp", NR_SHMEM_THPS }, 1360 #endif 1361 { "inactive_anon", NR_INACTIVE_ANON }, 1362 { "active_anon", NR_ACTIVE_ANON }, 1363 { "inactive_file", NR_INACTIVE_FILE }, 1364 { "active_file", NR_ACTIVE_FILE }, 1365 { "unevictable", NR_UNEVICTABLE }, 1366 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1367 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1368 #ifdef CONFIG_HUGETLB_PAGE 1369 { "hugetlb", NR_HUGETLB }, 1370 #endif 1371 1372 /* The memory events */ 1373 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1374 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1375 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1376 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1377 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1378 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1379 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1380 1381 { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, 1382 { "pgdemote_direct", PGDEMOTE_DIRECT }, 1383 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, 1384 { "pgdemote_proactive", PGDEMOTE_PROACTIVE }, 1385 #ifdef CONFIG_NUMA_BALANCING 1386 { "pgpromote_success", PGPROMOTE_SUCCESS }, 1387 #endif 1388 }; 1389 1390 /* The actual unit of the state item, not the same as the output unit */ 1391 static int memcg_page_state_unit(int item) 1392 { 1393 switch (item) { 1394 case MEMCG_PERCPU_B: 1395 case MEMCG_ZSWAP_B: 1396 case NR_SLAB_RECLAIMABLE_B: 1397 case NR_SLAB_UNRECLAIMABLE_B: 1398 return 1; 1399 case NR_KERNEL_STACK_KB: 1400 return SZ_1K; 1401 default: 1402 return PAGE_SIZE; 1403 } 1404 } 1405 1406 /* Translate stat items to the correct unit for memory.stat output */ 1407 static int memcg_page_state_output_unit(int item) 1408 { 1409 /* 1410 * Workingset state is actually in pages, but we export it to userspace 1411 * as a scalar count of events, so special case it here. 1412 * 1413 * Demotion and promotion activities are exported in pages, consistent 1414 * with their global counterparts. 1415 */ 1416 switch (item) { 1417 case WORKINGSET_REFAULT_ANON: 1418 case WORKINGSET_REFAULT_FILE: 1419 case WORKINGSET_ACTIVATE_ANON: 1420 case WORKINGSET_ACTIVATE_FILE: 1421 case WORKINGSET_RESTORE_ANON: 1422 case WORKINGSET_RESTORE_FILE: 1423 case WORKINGSET_NODERECLAIM: 1424 case PGDEMOTE_KSWAPD: 1425 case PGDEMOTE_DIRECT: 1426 case PGDEMOTE_KHUGEPAGED: 1427 case PGDEMOTE_PROACTIVE: 1428 #ifdef CONFIG_NUMA_BALANCING 1429 case PGPROMOTE_SUCCESS: 1430 #endif 1431 return 1; 1432 default: 1433 return memcg_page_state_unit(item); 1434 } 1435 } 1436 1437 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) 1438 { 1439 return memcg_page_state(memcg, item) * 1440 memcg_page_state_output_unit(item); 1441 } 1442 1443 #ifdef CONFIG_MEMCG_V1 1444 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) 1445 { 1446 return memcg_page_state_local(memcg, item) * 1447 memcg_page_state_output_unit(item); 1448 } 1449 #endif 1450 1451 #ifdef CONFIG_HUGETLB_PAGE 1452 static bool memcg_accounts_hugetlb(void) 1453 { 1454 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; 1455 } 1456 #else /* CONFIG_HUGETLB_PAGE */ 1457 static bool memcg_accounts_hugetlb(void) 1458 { 1459 return false; 1460 } 1461 #endif /* CONFIG_HUGETLB_PAGE */ 1462 1463 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1464 { 1465 int i; 1466 1467 /* 1468 * Provide statistics on the state of the memory subsystem as 1469 * well as cumulative event counters that show past behavior. 1470 * 1471 * This list is ordered following a combination of these gradients: 1472 * 1) generic big picture -> specifics and details 1473 * 2) reflecting userspace activity -> reflecting kernel heuristics 1474 * 1475 * Current memory state: 1476 */ 1477 mem_cgroup_flush_stats(memcg); 1478 1479 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1480 u64 size; 1481 1482 #ifdef CONFIG_HUGETLB_PAGE 1483 if (unlikely(memory_stats[i].idx == NR_HUGETLB) && 1484 !memcg_accounts_hugetlb()) 1485 continue; 1486 #endif 1487 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1488 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); 1489 1490 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1491 size += memcg_page_state_output(memcg, 1492 NR_SLAB_RECLAIMABLE_B); 1493 seq_buf_printf(s, "slab %llu\n", size); 1494 } 1495 } 1496 1497 /* Accumulated memory events */ 1498 seq_buf_printf(s, "pgscan %lu\n", 1499 memcg_events(memcg, PGSCAN_KSWAPD) + 1500 memcg_events(memcg, PGSCAN_DIRECT) + 1501 memcg_events(memcg, PGSCAN_PROACTIVE) + 1502 memcg_events(memcg, PGSCAN_KHUGEPAGED)); 1503 seq_buf_printf(s, "pgsteal %lu\n", 1504 memcg_events(memcg, PGSTEAL_KSWAPD) + 1505 memcg_events(memcg, PGSTEAL_DIRECT) + 1506 memcg_events(memcg, PGSTEAL_PROACTIVE) + 1507 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); 1508 1509 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1510 #ifdef CONFIG_MEMCG_V1 1511 if (memcg_vm_event_stat[i] == PGPGIN || 1512 memcg_vm_event_stat[i] == PGPGOUT) 1513 continue; 1514 #endif 1515 seq_buf_printf(s, "%s %lu\n", 1516 vm_event_name(memcg_vm_event_stat[i]), 1517 memcg_events(memcg, memcg_vm_event_stat[i])); 1518 } 1519 } 1520 1521 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1522 { 1523 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1524 memcg_stat_format(memcg, s); 1525 else 1526 memcg1_stat_format(memcg, s); 1527 if (seq_buf_has_overflowed(s)) 1528 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__); 1529 } 1530 1531 /** 1532 * mem_cgroup_print_oom_context: Print OOM information relevant to 1533 * memory controller. 1534 * @memcg: The memory cgroup that went over limit 1535 * @p: Task that is going to be killed 1536 * 1537 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1538 * enabled 1539 */ 1540 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1541 { 1542 rcu_read_lock(); 1543 1544 if (memcg) { 1545 pr_cont(",oom_memcg="); 1546 pr_cont_cgroup_path(memcg->css.cgroup); 1547 } else 1548 pr_cont(",global_oom"); 1549 if (p) { 1550 pr_cont(",task_memcg="); 1551 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1552 } 1553 rcu_read_unlock(); 1554 } 1555 1556 /** 1557 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1558 * memory controller. 1559 * @memcg: The memory cgroup that went over limit 1560 */ 1561 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1562 { 1563 /* Use static buffer, for the caller is holding oom_lock. */ 1564 static char buf[SEQ_BUF_SIZE]; 1565 struct seq_buf s; 1566 unsigned long memory_failcnt; 1567 1568 lockdep_assert_held(&oom_lock); 1569 1570 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1571 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); 1572 else 1573 memory_failcnt = memcg->memory.failcnt; 1574 1575 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1576 K((u64)page_counter_read(&memcg->memory)), 1577 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); 1578 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1579 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1580 K((u64)page_counter_read(&memcg->swap)), 1581 K((u64)READ_ONCE(memcg->swap.max)), 1582 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 1583 #ifdef CONFIG_MEMCG_V1 1584 else { 1585 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1586 K((u64)page_counter_read(&memcg->memsw)), 1587 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1588 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1589 K((u64)page_counter_read(&memcg->kmem)), 1590 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1591 } 1592 #endif 1593 1594 pr_info("Memory cgroup stats for "); 1595 pr_cont_cgroup_path(memcg->css.cgroup); 1596 pr_cont(":"); 1597 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 1598 memory_stat_format(memcg, &s); 1599 seq_buf_do_printk(&s, KERN_INFO); 1600 } 1601 1602 /* 1603 * Return the memory (and swap, if configured) limit for a memcg. 1604 */ 1605 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1606 { 1607 unsigned long max = READ_ONCE(memcg->memory.max); 1608 1609 if (do_memsw_account()) { 1610 if (mem_cgroup_swappiness(memcg)) { 1611 /* Calculate swap excess capacity from memsw limit */ 1612 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1613 1614 max += min(swap, (unsigned long)total_swap_pages); 1615 } 1616 } else { 1617 if (mem_cgroup_swappiness(memcg)) 1618 max += min(READ_ONCE(memcg->swap.max), 1619 (unsigned long)total_swap_pages); 1620 } 1621 return max; 1622 } 1623 1624 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1625 { 1626 return page_counter_read(&memcg->memory); 1627 } 1628 1629 void __memcg_memory_event(struct mem_cgroup *memcg, 1630 enum memcg_memory_event event, bool allow_spinning) 1631 { 1632 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1633 event == MEMCG_SWAP_FAIL; 1634 1635 /* For now only MEMCG_MAX can happen with !allow_spinning context. */ 1636 VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX); 1637 1638 atomic_long_inc(&memcg->memory_events_local[event]); 1639 if (!swap_event && allow_spinning) 1640 cgroup_file_notify(&memcg->events_local_file); 1641 1642 do { 1643 atomic_long_inc(&memcg->memory_events[event]); 1644 if (allow_spinning) { 1645 if (swap_event) 1646 cgroup_file_notify(&memcg->swap_events_file); 1647 else 1648 cgroup_file_notify(&memcg->events_file); 1649 } 1650 1651 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1652 break; 1653 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1654 break; 1655 } while ((memcg = parent_mem_cgroup(memcg)) && 1656 !mem_cgroup_is_root(memcg)); 1657 } 1658 EXPORT_SYMBOL_GPL(__memcg_memory_event); 1659 1660 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1661 int order) 1662 { 1663 struct oom_control oc = { 1664 .zonelist = NULL, 1665 .nodemask = NULL, 1666 .memcg = memcg, 1667 .gfp_mask = gfp_mask, 1668 .order = order, 1669 }; 1670 bool ret = true; 1671 1672 if (mutex_lock_killable(&oom_lock)) 1673 return true; 1674 1675 if (mem_cgroup_margin(memcg) >= (1 << order)) 1676 goto unlock; 1677 1678 /* 1679 * A few threads which were not waiting at mutex_lock_killable() can 1680 * fail to bail out. Therefore, check again after holding oom_lock. 1681 */ 1682 ret = out_of_memory(&oc); 1683 1684 unlock: 1685 mutex_unlock(&oom_lock); 1686 return ret; 1687 } 1688 1689 /* 1690 * Returns true if successfully killed one or more processes. Though in some 1691 * corner cases it can return true even without killing any process. 1692 */ 1693 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1694 { 1695 bool locked, ret; 1696 1697 if (order > PAGE_ALLOC_COSTLY_ORDER) 1698 return false; 1699 1700 memcg_memory_event(memcg, MEMCG_OOM); 1701 1702 if (!memcg1_oom_prepare(memcg, &locked)) 1703 return false; 1704 1705 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1706 1707 memcg1_oom_finish(memcg, locked); 1708 1709 return ret; 1710 } 1711 1712 /** 1713 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1714 * @victim: task to be killed by the OOM killer 1715 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1716 * 1717 * Returns a pointer to a memory cgroup, which has to be cleaned up 1718 * by killing all belonging OOM-killable tasks. 1719 * 1720 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1721 */ 1722 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1723 struct mem_cgroup *oom_domain) 1724 { 1725 struct mem_cgroup *oom_group = NULL; 1726 struct mem_cgroup *memcg; 1727 1728 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1729 return NULL; 1730 1731 if (!oom_domain) 1732 oom_domain = root_mem_cgroup; 1733 1734 rcu_read_lock(); 1735 1736 memcg = mem_cgroup_from_task(victim); 1737 if (mem_cgroup_is_root(memcg)) 1738 goto out; 1739 1740 /* 1741 * If the victim task has been asynchronously moved to a different 1742 * memory cgroup, we might end up killing tasks outside oom_domain. 1743 * In this case it's better to ignore memory.group.oom. 1744 */ 1745 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1746 goto out; 1747 1748 /* 1749 * Traverse the memory cgroup hierarchy from the victim task's 1750 * cgroup up to the OOMing cgroup (or root) to find the 1751 * highest-level memory cgroup with oom.group set. 1752 */ 1753 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1754 if (READ_ONCE(memcg->oom_group)) 1755 oom_group = memcg; 1756 1757 if (memcg == oom_domain) 1758 break; 1759 } 1760 1761 if (oom_group) 1762 css_get(&oom_group->css); 1763 out: 1764 rcu_read_unlock(); 1765 1766 return oom_group; 1767 } 1768 1769 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1770 { 1771 pr_info("Tasks in "); 1772 pr_cont_cgroup_path(memcg->css.cgroup); 1773 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1774 } 1775 1776 /* 1777 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their 1778 * nr_pages in a single cacheline. This may change in future. 1779 */ 1780 #define NR_MEMCG_STOCK 7 1781 #define FLUSHING_CACHED_CHARGE 0 1782 struct memcg_stock_pcp { 1783 local_trylock_t lock; 1784 uint8_t nr_pages[NR_MEMCG_STOCK]; 1785 struct mem_cgroup *cached[NR_MEMCG_STOCK]; 1786 1787 struct work_struct work; 1788 unsigned long flags; 1789 }; 1790 1791 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = { 1792 .lock = INIT_LOCAL_TRYLOCK(lock), 1793 }; 1794 1795 struct obj_stock_pcp { 1796 local_trylock_t lock; 1797 unsigned int nr_bytes; 1798 struct obj_cgroup *cached_objcg; 1799 struct pglist_data *cached_pgdat; 1800 int nr_slab_reclaimable_b; 1801 int nr_slab_unreclaimable_b; 1802 1803 struct work_struct work; 1804 unsigned long flags; 1805 }; 1806 1807 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = { 1808 .lock = INIT_LOCAL_TRYLOCK(lock), 1809 }; 1810 1811 static DEFINE_MUTEX(percpu_charge_mutex); 1812 1813 static void drain_obj_stock(struct obj_stock_pcp *stock); 1814 static bool obj_stock_flush_required(struct obj_stock_pcp *stock, 1815 struct mem_cgroup *root_memcg); 1816 1817 /** 1818 * consume_stock: Try to consume stocked charge on this cpu. 1819 * @memcg: memcg to consume from. 1820 * @nr_pages: how many pages to charge. 1821 * 1822 * Consume the cached charge if enough nr_pages are present otherwise return 1823 * failure. Also return failure for charge request larger than 1824 * MEMCG_CHARGE_BATCH or if the local lock is already taken. 1825 * 1826 * returns true if successful, false otherwise. 1827 */ 1828 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1829 { 1830 struct memcg_stock_pcp *stock; 1831 uint8_t stock_pages; 1832 bool ret = false; 1833 int i; 1834 1835 if (nr_pages > MEMCG_CHARGE_BATCH || 1836 !local_trylock(&memcg_stock.lock)) 1837 return ret; 1838 1839 stock = this_cpu_ptr(&memcg_stock); 1840 1841 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 1842 if (memcg != READ_ONCE(stock->cached[i])) 1843 continue; 1844 1845 stock_pages = READ_ONCE(stock->nr_pages[i]); 1846 if (stock_pages >= nr_pages) { 1847 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages); 1848 ret = true; 1849 } 1850 break; 1851 } 1852 1853 local_unlock(&memcg_stock.lock); 1854 1855 return ret; 1856 } 1857 1858 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 1859 { 1860 page_counter_uncharge(&memcg->memory, nr_pages); 1861 if (do_memsw_account()) 1862 page_counter_uncharge(&memcg->memsw, nr_pages); 1863 } 1864 1865 /* 1866 * Returns stocks cached in percpu and reset cached information. 1867 */ 1868 static void drain_stock(struct memcg_stock_pcp *stock, int i) 1869 { 1870 struct mem_cgroup *old = READ_ONCE(stock->cached[i]); 1871 uint8_t stock_pages; 1872 1873 if (!old) 1874 return; 1875 1876 stock_pages = READ_ONCE(stock->nr_pages[i]); 1877 if (stock_pages) { 1878 memcg_uncharge(old, stock_pages); 1879 WRITE_ONCE(stock->nr_pages[i], 0); 1880 } 1881 1882 css_put(&old->css); 1883 WRITE_ONCE(stock->cached[i], NULL); 1884 } 1885 1886 static void drain_stock_fully(struct memcg_stock_pcp *stock) 1887 { 1888 int i; 1889 1890 for (i = 0; i < NR_MEMCG_STOCK; ++i) 1891 drain_stock(stock, i); 1892 } 1893 1894 static void drain_local_memcg_stock(struct work_struct *dummy) 1895 { 1896 struct memcg_stock_pcp *stock; 1897 1898 if (WARN_ONCE(!in_task(), "drain in non-task context")) 1899 return; 1900 1901 local_lock(&memcg_stock.lock); 1902 1903 stock = this_cpu_ptr(&memcg_stock); 1904 drain_stock_fully(stock); 1905 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1906 1907 local_unlock(&memcg_stock.lock); 1908 } 1909 1910 static void drain_local_obj_stock(struct work_struct *dummy) 1911 { 1912 struct obj_stock_pcp *stock; 1913 1914 if (WARN_ONCE(!in_task(), "drain in non-task context")) 1915 return; 1916 1917 local_lock(&obj_stock.lock); 1918 1919 stock = this_cpu_ptr(&obj_stock); 1920 drain_obj_stock(stock); 1921 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1922 1923 local_unlock(&obj_stock.lock); 1924 } 1925 1926 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1927 { 1928 struct memcg_stock_pcp *stock; 1929 struct mem_cgroup *cached; 1930 uint8_t stock_pages; 1931 bool success = false; 1932 int empty_slot = -1; 1933 int i; 1934 1935 /* 1936 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we 1937 * decide to increase it more than 127 then we will need more careful 1938 * handling of nr_pages[] in struct memcg_stock_pcp. 1939 */ 1940 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX); 1941 1942 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg)); 1943 1944 if (nr_pages > MEMCG_CHARGE_BATCH || 1945 !local_trylock(&memcg_stock.lock)) { 1946 /* 1947 * In case of larger than batch refill or unlikely failure to 1948 * lock the percpu memcg_stock.lock, uncharge memcg directly. 1949 */ 1950 memcg_uncharge(memcg, nr_pages); 1951 return; 1952 } 1953 1954 stock = this_cpu_ptr(&memcg_stock); 1955 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 1956 cached = READ_ONCE(stock->cached[i]); 1957 if (!cached && empty_slot == -1) 1958 empty_slot = i; 1959 if (memcg == READ_ONCE(stock->cached[i])) { 1960 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages; 1961 WRITE_ONCE(stock->nr_pages[i], stock_pages); 1962 if (stock_pages > MEMCG_CHARGE_BATCH) 1963 drain_stock(stock, i); 1964 success = true; 1965 break; 1966 } 1967 } 1968 1969 if (!success) { 1970 i = empty_slot; 1971 if (i == -1) { 1972 i = get_random_u32_below(NR_MEMCG_STOCK); 1973 drain_stock(stock, i); 1974 } 1975 css_get(&memcg->css); 1976 WRITE_ONCE(stock->cached[i], memcg); 1977 WRITE_ONCE(stock->nr_pages[i], nr_pages); 1978 } 1979 1980 local_unlock(&memcg_stock.lock); 1981 } 1982 1983 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock, 1984 struct mem_cgroup *root_memcg) 1985 { 1986 struct mem_cgroup *memcg; 1987 bool flush = false; 1988 int i; 1989 1990 rcu_read_lock(); 1991 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 1992 memcg = READ_ONCE(stock->cached[i]); 1993 if (!memcg) 1994 continue; 1995 1996 if (READ_ONCE(stock->nr_pages[i]) && 1997 mem_cgroup_is_descendant(memcg, root_memcg)) { 1998 flush = true; 1999 break; 2000 } 2001 } 2002 rcu_read_unlock(); 2003 return flush; 2004 } 2005 2006 /* 2007 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2008 * of the hierarchy under it. 2009 */ 2010 void drain_all_stock(struct mem_cgroup *root_memcg) 2011 { 2012 int cpu, curcpu; 2013 2014 /* If someone's already draining, avoid adding running more workers. */ 2015 if (!mutex_trylock(&percpu_charge_mutex)) 2016 return; 2017 /* 2018 * Notify other cpus that system-wide "drain" is running 2019 * We do not care about races with the cpu hotplug because cpu down 2020 * as well as workers from this path always operate on the local 2021 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2022 */ 2023 migrate_disable(); 2024 curcpu = smp_processor_id(); 2025 for_each_online_cpu(cpu) { 2026 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu); 2027 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu); 2028 2029 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) && 2030 is_memcg_drain_needed(memcg_st, root_memcg) && 2031 !test_and_set_bit(FLUSHING_CACHED_CHARGE, 2032 &memcg_st->flags)) { 2033 if (cpu == curcpu) 2034 drain_local_memcg_stock(&memcg_st->work); 2035 else if (!cpu_is_isolated(cpu)) 2036 schedule_work_on(cpu, &memcg_st->work); 2037 } 2038 2039 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) && 2040 obj_stock_flush_required(obj_st, root_memcg) && 2041 !test_and_set_bit(FLUSHING_CACHED_CHARGE, 2042 &obj_st->flags)) { 2043 if (cpu == curcpu) 2044 drain_local_obj_stock(&obj_st->work); 2045 else if (!cpu_is_isolated(cpu)) 2046 schedule_work_on(cpu, &obj_st->work); 2047 } 2048 } 2049 migrate_enable(); 2050 mutex_unlock(&percpu_charge_mutex); 2051 } 2052 2053 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2054 { 2055 /* no need for the local lock */ 2056 drain_obj_stock(&per_cpu(obj_stock, cpu)); 2057 drain_stock_fully(&per_cpu(memcg_stock, cpu)); 2058 2059 return 0; 2060 } 2061 2062 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2063 unsigned int nr_pages, 2064 gfp_t gfp_mask) 2065 { 2066 unsigned long nr_reclaimed = 0; 2067 2068 do { 2069 unsigned long pflags; 2070 2071 if (page_counter_read(&memcg->memory) <= 2072 READ_ONCE(memcg->memory.high)) 2073 continue; 2074 2075 memcg_memory_event(memcg, MEMCG_HIGH); 2076 2077 psi_memstall_enter(&pflags); 2078 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2079 gfp_mask, 2080 MEMCG_RECLAIM_MAY_SWAP, 2081 NULL); 2082 psi_memstall_leave(&pflags); 2083 } while ((memcg = parent_mem_cgroup(memcg)) && 2084 !mem_cgroup_is_root(memcg)); 2085 2086 return nr_reclaimed; 2087 } 2088 2089 static void high_work_func(struct work_struct *work) 2090 { 2091 struct mem_cgroup *memcg; 2092 2093 memcg = container_of(work, struct mem_cgroup, high_work); 2094 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2095 } 2096 2097 /* 2098 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2099 * enough to still cause a significant slowdown in most cases, while still 2100 * allowing diagnostics and tracing to proceed without becoming stuck. 2101 */ 2102 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2103 2104 /* 2105 * When calculating the delay, we use these either side of the exponentiation to 2106 * maintain precision and scale to a reasonable number of jiffies (see the table 2107 * below. 2108 * 2109 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2110 * overage ratio to a delay. 2111 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2112 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2113 * to produce a reasonable delay curve. 2114 * 2115 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2116 * reasonable delay curve compared to precision-adjusted overage, not 2117 * penalising heavily at first, but still making sure that growth beyond the 2118 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2119 * example, with a high of 100 megabytes: 2120 * 2121 * +-------+------------------------+ 2122 * | usage | time to allocate in ms | 2123 * +-------+------------------------+ 2124 * | 100M | 0 | 2125 * | 101M | 6 | 2126 * | 102M | 25 | 2127 * | 103M | 57 | 2128 * | 104M | 102 | 2129 * | 105M | 159 | 2130 * | 106M | 230 | 2131 * | 107M | 313 | 2132 * | 108M | 409 | 2133 * | 109M | 518 | 2134 * | 110M | 639 | 2135 * | 111M | 774 | 2136 * | 112M | 921 | 2137 * | 113M | 1081 | 2138 * | 114M | 1254 | 2139 * | 115M | 1439 | 2140 * | 116M | 1638 | 2141 * | 117M | 1849 | 2142 * | 118M | 2000 | 2143 * | 119M | 2000 | 2144 * | 120M | 2000 | 2145 * +-------+------------------------+ 2146 */ 2147 #define MEMCG_DELAY_PRECISION_SHIFT 20 2148 #define MEMCG_DELAY_SCALING_SHIFT 14 2149 2150 static u64 calculate_overage(unsigned long usage, unsigned long high) 2151 { 2152 u64 overage; 2153 2154 if (usage <= high) 2155 return 0; 2156 2157 /* 2158 * Prevent division by 0 in overage calculation by acting as if 2159 * it was a threshold of 1 page 2160 */ 2161 high = max(high, 1UL); 2162 2163 overage = usage - high; 2164 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2165 return div64_u64(overage, high); 2166 } 2167 2168 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2169 { 2170 u64 overage, max_overage = 0; 2171 2172 do { 2173 overage = calculate_overage(page_counter_read(&memcg->memory), 2174 READ_ONCE(memcg->memory.high)); 2175 max_overage = max(overage, max_overage); 2176 } while ((memcg = parent_mem_cgroup(memcg)) && 2177 !mem_cgroup_is_root(memcg)); 2178 2179 return max_overage; 2180 } 2181 2182 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2183 { 2184 u64 overage, max_overage = 0; 2185 2186 do { 2187 overage = calculate_overage(page_counter_read(&memcg->swap), 2188 READ_ONCE(memcg->swap.high)); 2189 if (overage) 2190 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2191 max_overage = max(overage, max_overage); 2192 } while ((memcg = parent_mem_cgroup(memcg)) && 2193 !mem_cgroup_is_root(memcg)); 2194 2195 return max_overage; 2196 } 2197 2198 /* 2199 * Get the number of jiffies that we should penalise a mischievous cgroup which 2200 * is exceeding its memory.high by checking both it and its ancestors. 2201 */ 2202 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2203 unsigned int nr_pages, 2204 u64 max_overage) 2205 { 2206 unsigned long penalty_jiffies; 2207 2208 if (!max_overage) 2209 return 0; 2210 2211 /* 2212 * We use overage compared to memory.high to calculate the number of 2213 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2214 * fairly lenient on small overages, and increasingly harsh when the 2215 * memcg in question makes it clear that it has no intention of stopping 2216 * its crazy behaviour, so we exponentially increase the delay based on 2217 * overage amount. 2218 */ 2219 penalty_jiffies = max_overage * max_overage * HZ; 2220 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2221 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2222 2223 /* 2224 * Factor in the task's own contribution to the overage, such that four 2225 * N-sized allocations are throttled approximately the same as one 2226 * 4N-sized allocation. 2227 * 2228 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2229 * larger the current charge patch is than that. 2230 */ 2231 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2232 } 2233 2234 /* 2235 * Reclaims memory over the high limit. Called directly from 2236 * try_charge() (context permitting), as well as from the userland 2237 * return path where reclaim is always able to block. 2238 */ 2239 void __mem_cgroup_handle_over_high(gfp_t gfp_mask) 2240 { 2241 unsigned long penalty_jiffies; 2242 unsigned long pflags; 2243 unsigned long nr_reclaimed; 2244 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2245 int nr_retries = MAX_RECLAIM_RETRIES; 2246 struct mem_cgroup *memcg; 2247 bool in_retry = false; 2248 2249 memcg = get_mem_cgroup_from_mm(current->mm); 2250 current->memcg_nr_pages_over_high = 0; 2251 2252 retry_reclaim: 2253 /* 2254 * Bail if the task is already exiting. Unlike memory.max, 2255 * memory.high enforcement isn't as strict, and there is no 2256 * OOM killer involved, which means the excess could already 2257 * be much bigger (and still growing) than it could for 2258 * memory.max; the dying task could get stuck in fruitless 2259 * reclaim for a long time, which isn't desirable. 2260 */ 2261 if (task_is_dying()) 2262 goto out; 2263 2264 /* 2265 * The allocating task should reclaim at least the batch size, but for 2266 * subsequent retries we only want to do what's necessary to prevent oom 2267 * or breaching resource isolation. 2268 * 2269 * This is distinct from memory.max or page allocator behaviour because 2270 * memory.high is currently batched, whereas memory.max and the page 2271 * allocator run every time an allocation is made. 2272 */ 2273 nr_reclaimed = reclaim_high(memcg, 2274 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2275 gfp_mask); 2276 2277 /* 2278 * memory.high is breached and reclaim is unable to keep up. Throttle 2279 * allocators proactively to slow down excessive growth. 2280 */ 2281 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2282 mem_find_max_overage(memcg)); 2283 2284 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2285 swap_find_max_overage(memcg)); 2286 2287 /* 2288 * Clamp the max delay per usermode return so as to still keep the 2289 * application moving forwards and also permit diagnostics, albeit 2290 * extremely slowly. 2291 */ 2292 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2293 2294 /* 2295 * Don't sleep if the amount of jiffies this memcg owes us is so low 2296 * that it's not even worth doing, in an attempt to be nice to those who 2297 * go only a small amount over their memory.high value and maybe haven't 2298 * been aggressively reclaimed enough yet. 2299 */ 2300 if (penalty_jiffies <= HZ / 100) 2301 goto out; 2302 2303 /* 2304 * If reclaim is making forward progress but we're still over 2305 * memory.high, we want to encourage that rather than doing allocator 2306 * throttling. 2307 */ 2308 if (nr_reclaimed || nr_retries--) { 2309 in_retry = true; 2310 goto retry_reclaim; 2311 } 2312 2313 /* 2314 * Reclaim didn't manage to push usage below the limit, slow 2315 * this allocating task down. 2316 * 2317 * If we exit early, we're guaranteed to die (since 2318 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2319 * need to account for any ill-begotten jiffies to pay them off later. 2320 */ 2321 psi_memstall_enter(&pflags); 2322 schedule_timeout_killable(penalty_jiffies); 2323 psi_memstall_leave(&pflags); 2324 2325 out: 2326 css_put(&memcg->css); 2327 } 2328 2329 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2330 unsigned int nr_pages) 2331 { 2332 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2333 int nr_retries = MAX_RECLAIM_RETRIES; 2334 struct mem_cgroup *mem_over_limit; 2335 struct page_counter *counter; 2336 unsigned long nr_reclaimed; 2337 bool passed_oom = false; 2338 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2339 bool drained = false; 2340 bool raised_max_event = false; 2341 unsigned long pflags; 2342 bool allow_spinning = gfpflags_allow_spinning(gfp_mask); 2343 2344 retry: 2345 if (consume_stock(memcg, nr_pages)) 2346 return 0; 2347 2348 if (!allow_spinning) 2349 /* Avoid the refill and flush of the older stock */ 2350 batch = nr_pages; 2351 2352 if (!do_memsw_account() || 2353 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2354 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2355 goto done_restock; 2356 if (do_memsw_account()) 2357 page_counter_uncharge(&memcg->memsw, batch); 2358 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2359 } else { 2360 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2361 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2362 } 2363 2364 if (batch > nr_pages) { 2365 batch = nr_pages; 2366 goto retry; 2367 } 2368 2369 /* 2370 * Prevent unbounded recursion when reclaim operations need to 2371 * allocate memory. This might exceed the limits temporarily, 2372 * but we prefer facilitating memory reclaim and getting back 2373 * under the limit over triggering OOM kills in these cases. 2374 */ 2375 if (unlikely(current->flags & PF_MEMALLOC)) 2376 goto force; 2377 2378 if (unlikely(task_in_memcg_oom(current))) 2379 goto nomem; 2380 2381 if (!gfpflags_allow_blocking(gfp_mask)) 2382 goto nomem; 2383 2384 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); 2385 raised_max_event = true; 2386 2387 psi_memstall_enter(&pflags); 2388 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2389 gfp_mask, reclaim_options, NULL); 2390 psi_memstall_leave(&pflags); 2391 2392 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2393 goto retry; 2394 2395 if (!drained) { 2396 drain_all_stock(mem_over_limit); 2397 drained = true; 2398 goto retry; 2399 } 2400 2401 if (gfp_mask & __GFP_NORETRY) 2402 goto nomem; 2403 /* 2404 * Even though the limit is exceeded at this point, reclaim 2405 * may have been able to free some pages. Retry the charge 2406 * before killing the task. 2407 * 2408 * Only for regular pages, though: huge pages are rather 2409 * unlikely to succeed so close to the limit, and we fall back 2410 * to regular pages anyway in case of failure. 2411 */ 2412 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2413 goto retry; 2414 2415 if (nr_retries--) 2416 goto retry; 2417 2418 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2419 goto nomem; 2420 2421 /* Avoid endless loop for tasks bypassed by the oom killer */ 2422 if (passed_oom && task_is_dying()) 2423 goto nomem; 2424 2425 /* 2426 * keep retrying as long as the memcg oom killer is able to make 2427 * a forward progress or bypass the charge if the oom killer 2428 * couldn't make any progress. 2429 */ 2430 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2431 get_order(nr_pages * PAGE_SIZE))) { 2432 passed_oom = true; 2433 nr_retries = MAX_RECLAIM_RETRIES; 2434 goto retry; 2435 } 2436 nomem: 2437 /* 2438 * Memcg doesn't have a dedicated reserve for atomic 2439 * allocations. But like the global atomic pool, we need to 2440 * put the burden of reclaim on regular allocation requests 2441 * and let these go through as privileged allocations. 2442 */ 2443 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2444 return -ENOMEM; 2445 force: 2446 /* 2447 * If the allocation has to be enforced, don't forget to raise 2448 * a MEMCG_MAX event. 2449 */ 2450 if (!raised_max_event) 2451 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); 2452 2453 /* 2454 * The allocation either can't fail or will lead to more memory 2455 * being freed very soon. Allow memory usage go over the limit 2456 * temporarily by force charging it. 2457 */ 2458 page_counter_charge(&memcg->memory, nr_pages); 2459 if (do_memsw_account()) 2460 page_counter_charge(&memcg->memsw, nr_pages); 2461 2462 return 0; 2463 2464 done_restock: 2465 if (batch > nr_pages) 2466 refill_stock(memcg, batch - nr_pages); 2467 2468 /* 2469 * If the hierarchy is above the normal consumption range, schedule 2470 * reclaim on returning to userland. We can perform reclaim here 2471 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2472 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2473 * not recorded as it most likely matches current's and won't 2474 * change in the meantime. As high limit is checked again before 2475 * reclaim, the cost of mismatch is negligible. 2476 */ 2477 do { 2478 bool mem_high, swap_high; 2479 2480 mem_high = page_counter_read(&memcg->memory) > 2481 READ_ONCE(memcg->memory.high); 2482 swap_high = page_counter_read(&memcg->swap) > 2483 READ_ONCE(memcg->swap.high); 2484 2485 /* Don't bother a random interrupted task */ 2486 if (!in_task()) { 2487 if (mem_high) { 2488 schedule_work(&memcg->high_work); 2489 break; 2490 } 2491 continue; 2492 } 2493 2494 if (mem_high || swap_high) { 2495 /* 2496 * The allocating tasks in this cgroup will need to do 2497 * reclaim or be throttled to prevent further growth 2498 * of the memory or swap footprints. 2499 * 2500 * Target some best-effort fairness between the tasks, 2501 * and distribute reclaim work and delay penalties 2502 * based on how much each task is actually allocating. 2503 */ 2504 current->memcg_nr_pages_over_high += batch; 2505 set_notify_resume(current); 2506 break; 2507 } 2508 } while ((memcg = parent_mem_cgroup(memcg))); 2509 2510 /* 2511 * Reclaim is set up above to be called from the userland 2512 * return path. But also attempt synchronous reclaim to avoid 2513 * excessive overrun while the task is still inside the 2514 * kernel. If this is successful, the return path will see it 2515 * when it rechecks the overage and simply bail out. 2516 */ 2517 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2518 !(current->flags & PF_MEMALLOC) && 2519 gfpflags_allow_blocking(gfp_mask)) 2520 __mem_cgroup_handle_over_high(gfp_mask); 2521 return 0; 2522 } 2523 2524 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2525 unsigned int nr_pages) 2526 { 2527 if (mem_cgroup_is_root(memcg)) 2528 return 0; 2529 2530 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2531 } 2532 2533 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2534 { 2535 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); 2536 /* 2537 * Any of the following ensures page's memcg stability: 2538 * 2539 * - the page lock 2540 * - LRU isolation 2541 * - exclusive reference 2542 */ 2543 folio->memcg_data = (unsigned long)memcg; 2544 } 2545 2546 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 2547 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg, 2548 struct pglist_data *pgdat, 2549 enum node_stat_item idx, int nr) 2550 { 2551 struct lruvec *lruvec; 2552 2553 if (likely(!in_nmi())) { 2554 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2555 mod_memcg_lruvec_state(lruvec, idx, nr); 2556 } else { 2557 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id]; 2558 2559 /* preemption is disabled in_nmi(). */ 2560 css_rstat_updated(&memcg->css, smp_processor_id()); 2561 if (idx == NR_SLAB_RECLAIMABLE_B) 2562 atomic_add(nr, &pn->slab_reclaimable); 2563 else 2564 atomic_add(nr, &pn->slab_unreclaimable); 2565 } 2566 } 2567 #else 2568 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg, 2569 struct pglist_data *pgdat, 2570 enum node_stat_item idx, int nr) 2571 { 2572 struct lruvec *lruvec; 2573 2574 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2575 mod_memcg_lruvec_state(lruvec, idx, nr); 2576 } 2577 #endif 2578 2579 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 2580 struct pglist_data *pgdat, 2581 enum node_stat_item idx, int nr) 2582 { 2583 struct mem_cgroup *memcg; 2584 2585 rcu_read_lock(); 2586 memcg = obj_cgroup_memcg(objcg); 2587 account_slab_nmi_safe(memcg, pgdat, idx, nr); 2588 rcu_read_unlock(); 2589 } 2590 2591 static __always_inline 2592 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) 2593 { 2594 /* 2595 * Slab objects are accounted individually, not per-page. 2596 * Memcg membership data for each individual object is saved in 2597 * slab->obj_exts. 2598 */ 2599 if (folio_test_slab(folio)) { 2600 struct slabobj_ext *obj_exts; 2601 struct slab *slab; 2602 unsigned int off; 2603 2604 slab = folio_slab(folio); 2605 obj_exts = slab_obj_exts(slab); 2606 if (!obj_exts) 2607 return NULL; 2608 2609 off = obj_to_index(slab->slab_cache, slab, p); 2610 if (obj_exts[off].objcg) 2611 return obj_cgroup_memcg(obj_exts[off].objcg); 2612 2613 return NULL; 2614 } 2615 2616 /* 2617 * folio_memcg_check() is used here, because in theory we can encounter 2618 * a folio where the slab flag has been cleared already, but 2619 * slab->obj_exts has not been freed yet 2620 * folio_memcg_check() will guarantee that a proper memory 2621 * cgroup pointer or NULL will be returned. 2622 */ 2623 return folio_memcg_check(folio); 2624 } 2625 2626 /* 2627 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2628 * It is not suitable for objects allocated using vmalloc(). 2629 * 2630 * A passed kernel object must be a slab object or a generic kernel page. 2631 * 2632 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2633 * cgroup_mutex, etc. 2634 */ 2635 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 2636 { 2637 if (mem_cgroup_disabled()) 2638 return NULL; 2639 2640 return mem_cgroup_from_obj_folio(virt_to_folio(p), p); 2641 } 2642 2643 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 2644 { 2645 struct obj_cgroup *objcg = NULL; 2646 2647 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2648 objcg = rcu_dereference(memcg->objcg); 2649 if (likely(objcg && obj_cgroup_tryget(objcg))) 2650 break; 2651 objcg = NULL; 2652 } 2653 return objcg; 2654 } 2655 2656 static struct obj_cgroup *current_objcg_update(void) 2657 { 2658 struct mem_cgroup *memcg; 2659 struct obj_cgroup *old, *objcg = NULL; 2660 2661 do { 2662 /* Atomically drop the update bit. */ 2663 old = xchg(¤t->objcg, NULL); 2664 if (old) { 2665 old = (struct obj_cgroup *) 2666 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); 2667 obj_cgroup_put(old); 2668 2669 old = NULL; 2670 } 2671 2672 /* If new objcg is NULL, no reason for the second atomic update. */ 2673 if (!current->mm || (current->flags & PF_KTHREAD)) 2674 return NULL; 2675 2676 /* 2677 * Release the objcg pointer from the previous iteration, 2678 * if try_cmpxcg() below fails. 2679 */ 2680 if (unlikely(objcg)) { 2681 obj_cgroup_put(objcg); 2682 objcg = NULL; 2683 } 2684 2685 /* 2686 * Obtain the new objcg pointer. The current task can be 2687 * asynchronously moved to another memcg and the previous 2688 * memcg can be offlined. So let's get the memcg pointer 2689 * and try get a reference to objcg under a rcu read lock. 2690 */ 2691 2692 rcu_read_lock(); 2693 memcg = mem_cgroup_from_task(current); 2694 objcg = __get_obj_cgroup_from_memcg(memcg); 2695 rcu_read_unlock(); 2696 2697 /* 2698 * Try set up a new objcg pointer atomically. If it 2699 * fails, it means the update flag was set concurrently, so 2700 * the whole procedure should be repeated. 2701 */ 2702 } while (!try_cmpxchg(¤t->objcg, &old, objcg)); 2703 2704 return objcg; 2705 } 2706 2707 __always_inline struct obj_cgroup *current_obj_cgroup(void) 2708 { 2709 struct mem_cgroup *memcg; 2710 struct obj_cgroup *objcg; 2711 2712 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi()) 2713 return NULL; 2714 2715 if (in_task()) { 2716 memcg = current->active_memcg; 2717 if (unlikely(memcg)) 2718 goto from_memcg; 2719 2720 objcg = READ_ONCE(current->objcg); 2721 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG)) 2722 objcg = current_objcg_update(); 2723 /* 2724 * Objcg reference is kept by the task, so it's safe 2725 * to use the objcg by the current task. 2726 */ 2727 return objcg; 2728 } 2729 2730 memcg = this_cpu_read(int_active_memcg); 2731 if (unlikely(memcg)) 2732 goto from_memcg; 2733 2734 return NULL; 2735 2736 from_memcg: 2737 objcg = NULL; 2738 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2739 /* 2740 * Memcg pointer is protected by scope (see set_active_memcg()) 2741 * and is pinning the corresponding objcg, so objcg can't go 2742 * away and can be used within the scope without any additional 2743 * protection. 2744 */ 2745 objcg = rcu_dereference_check(memcg->objcg, 1); 2746 if (likely(objcg)) 2747 break; 2748 } 2749 2750 return objcg; 2751 } 2752 2753 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 2754 { 2755 struct obj_cgroup *objcg; 2756 2757 if (!memcg_kmem_online()) 2758 return NULL; 2759 2760 if (folio_memcg_kmem(folio)) { 2761 objcg = __folio_objcg(folio); 2762 obj_cgroup_get(objcg); 2763 } else { 2764 struct mem_cgroup *memcg; 2765 2766 rcu_read_lock(); 2767 memcg = __folio_memcg(folio); 2768 if (memcg) 2769 objcg = __get_obj_cgroup_from_memcg(memcg); 2770 else 2771 objcg = NULL; 2772 rcu_read_unlock(); 2773 } 2774 return objcg; 2775 } 2776 2777 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 2778 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val) 2779 { 2780 if (likely(!in_nmi())) { 2781 mod_memcg_state(memcg, MEMCG_KMEM, val); 2782 } else { 2783 /* preemption is disabled in_nmi(). */ 2784 css_rstat_updated(&memcg->css, smp_processor_id()); 2785 atomic_add(val, &memcg->kmem_stat); 2786 } 2787 } 2788 #else 2789 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val) 2790 { 2791 mod_memcg_state(memcg, MEMCG_KMEM, val); 2792 } 2793 #endif 2794 2795 /* 2796 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2797 * @objcg: object cgroup to uncharge 2798 * @nr_pages: number of pages to uncharge 2799 */ 2800 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2801 unsigned int nr_pages) 2802 { 2803 struct mem_cgroup *memcg; 2804 2805 memcg = get_mem_cgroup_from_objcg(objcg); 2806 2807 account_kmem_nmi_safe(memcg, -nr_pages); 2808 memcg1_account_kmem(memcg, -nr_pages); 2809 if (!mem_cgroup_is_root(memcg)) 2810 refill_stock(memcg, nr_pages); 2811 2812 css_put(&memcg->css); 2813 } 2814 2815 /* 2816 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2817 * @objcg: object cgroup to charge 2818 * @gfp: reclaim mode 2819 * @nr_pages: number of pages to charge 2820 * 2821 * Returns 0 on success, an error code on failure. 2822 */ 2823 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 2824 unsigned int nr_pages) 2825 { 2826 struct mem_cgroup *memcg; 2827 int ret; 2828 2829 memcg = get_mem_cgroup_from_objcg(objcg); 2830 2831 ret = try_charge_memcg(memcg, gfp, nr_pages); 2832 if (ret) 2833 goto out; 2834 2835 account_kmem_nmi_safe(memcg, nr_pages); 2836 memcg1_account_kmem(memcg, nr_pages); 2837 out: 2838 css_put(&memcg->css); 2839 2840 return ret; 2841 } 2842 2843 static struct obj_cgroup *page_objcg(const struct page *page) 2844 { 2845 unsigned long memcg_data = page->memcg_data; 2846 2847 if (mem_cgroup_disabled() || !memcg_data) 2848 return NULL; 2849 2850 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM, 2851 page); 2852 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); 2853 } 2854 2855 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg) 2856 { 2857 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; 2858 } 2859 2860 /** 2861 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 2862 * @page: page to charge 2863 * @gfp: reclaim mode 2864 * @order: allocation order 2865 * 2866 * Returns 0 on success, an error code on failure. 2867 */ 2868 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 2869 { 2870 struct obj_cgroup *objcg; 2871 int ret = 0; 2872 2873 objcg = current_obj_cgroup(); 2874 if (objcg) { 2875 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 2876 if (!ret) { 2877 obj_cgroup_get(objcg); 2878 page_set_objcg(page, objcg); 2879 return 0; 2880 } 2881 } 2882 return ret; 2883 } 2884 2885 /** 2886 * __memcg_kmem_uncharge_page: uncharge a kmem page 2887 * @page: page to uncharge 2888 * @order: allocation order 2889 */ 2890 void __memcg_kmem_uncharge_page(struct page *page, int order) 2891 { 2892 struct obj_cgroup *objcg = page_objcg(page); 2893 unsigned int nr_pages = 1 << order; 2894 2895 if (!objcg) 2896 return; 2897 2898 obj_cgroup_uncharge_pages(objcg, nr_pages); 2899 page->memcg_data = 0; 2900 obj_cgroup_put(objcg); 2901 } 2902 2903 static void __account_obj_stock(struct obj_cgroup *objcg, 2904 struct obj_stock_pcp *stock, int nr, 2905 struct pglist_data *pgdat, enum node_stat_item idx) 2906 { 2907 int *bytes; 2908 2909 /* 2910 * Save vmstat data in stock and skip vmstat array update unless 2911 * accumulating over a page of vmstat data or when pgdat changes. 2912 */ 2913 if (stock->cached_pgdat != pgdat) { 2914 /* Flush the existing cached vmstat data */ 2915 struct pglist_data *oldpg = stock->cached_pgdat; 2916 2917 if (stock->nr_slab_reclaimable_b) { 2918 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 2919 stock->nr_slab_reclaimable_b); 2920 stock->nr_slab_reclaimable_b = 0; 2921 } 2922 if (stock->nr_slab_unreclaimable_b) { 2923 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 2924 stock->nr_slab_unreclaimable_b); 2925 stock->nr_slab_unreclaimable_b = 0; 2926 } 2927 stock->cached_pgdat = pgdat; 2928 } 2929 2930 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 2931 : &stock->nr_slab_unreclaimable_b; 2932 /* 2933 * Even for large object >= PAGE_SIZE, the vmstat data will still be 2934 * cached locally at least once before pushing it out. 2935 */ 2936 if (!*bytes) { 2937 *bytes = nr; 2938 nr = 0; 2939 } else { 2940 *bytes += nr; 2941 if (abs(*bytes) > PAGE_SIZE) { 2942 nr = *bytes; 2943 *bytes = 0; 2944 } else { 2945 nr = 0; 2946 } 2947 } 2948 if (nr) 2949 mod_objcg_mlstate(objcg, pgdat, idx, nr); 2950 } 2951 2952 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 2953 struct pglist_data *pgdat, enum node_stat_item idx) 2954 { 2955 struct obj_stock_pcp *stock; 2956 bool ret = false; 2957 2958 if (!local_trylock(&obj_stock.lock)) 2959 return ret; 2960 2961 stock = this_cpu_ptr(&obj_stock); 2962 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { 2963 stock->nr_bytes -= nr_bytes; 2964 ret = true; 2965 2966 if (pgdat) 2967 __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx); 2968 } 2969 2970 local_unlock(&obj_stock.lock); 2971 2972 return ret; 2973 } 2974 2975 static void drain_obj_stock(struct obj_stock_pcp *stock) 2976 { 2977 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); 2978 2979 if (!old) 2980 return; 2981 2982 if (stock->nr_bytes) { 2983 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 2984 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 2985 2986 if (nr_pages) { 2987 struct mem_cgroup *memcg; 2988 2989 memcg = get_mem_cgroup_from_objcg(old); 2990 2991 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 2992 memcg1_account_kmem(memcg, -nr_pages); 2993 if (!mem_cgroup_is_root(memcg)) 2994 memcg_uncharge(memcg, nr_pages); 2995 2996 css_put(&memcg->css); 2997 } 2998 2999 /* 3000 * The leftover is flushed to the centralized per-memcg value. 3001 * On the next attempt to refill obj stock it will be moved 3002 * to a per-cpu stock (probably, on an other CPU), see 3003 * refill_obj_stock(). 3004 * 3005 * How often it's flushed is a trade-off between the memory 3006 * limit enforcement accuracy and potential CPU contention, 3007 * so it might be changed in the future. 3008 */ 3009 atomic_add(nr_bytes, &old->nr_charged_bytes); 3010 stock->nr_bytes = 0; 3011 } 3012 3013 /* 3014 * Flush the vmstat data in current stock 3015 */ 3016 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3017 if (stock->nr_slab_reclaimable_b) { 3018 mod_objcg_mlstate(old, stock->cached_pgdat, 3019 NR_SLAB_RECLAIMABLE_B, 3020 stock->nr_slab_reclaimable_b); 3021 stock->nr_slab_reclaimable_b = 0; 3022 } 3023 if (stock->nr_slab_unreclaimable_b) { 3024 mod_objcg_mlstate(old, stock->cached_pgdat, 3025 NR_SLAB_UNRECLAIMABLE_B, 3026 stock->nr_slab_unreclaimable_b); 3027 stock->nr_slab_unreclaimable_b = 0; 3028 } 3029 stock->cached_pgdat = NULL; 3030 } 3031 3032 WRITE_ONCE(stock->cached_objcg, NULL); 3033 obj_cgroup_put(old); 3034 } 3035 3036 static bool obj_stock_flush_required(struct obj_stock_pcp *stock, 3037 struct mem_cgroup *root_memcg) 3038 { 3039 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); 3040 struct mem_cgroup *memcg; 3041 bool flush = false; 3042 3043 rcu_read_lock(); 3044 if (objcg) { 3045 memcg = obj_cgroup_memcg(objcg); 3046 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3047 flush = true; 3048 } 3049 rcu_read_unlock(); 3050 3051 return flush; 3052 } 3053 3054 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3055 bool allow_uncharge, int nr_acct, struct pglist_data *pgdat, 3056 enum node_stat_item idx) 3057 { 3058 struct obj_stock_pcp *stock; 3059 unsigned int nr_pages = 0; 3060 3061 if (!local_trylock(&obj_stock.lock)) { 3062 if (pgdat) 3063 mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes); 3064 nr_pages = nr_bytes >> PAGE_SHIFT; 3065 nr_bytes = nr_bytes & (PAGE_SIZE - 1); 3066 atomic_add(nr_bytes, &objcg->nr_charged_bytes); 3067 goto out; 3068 } 3069 3070 stock = this_cpu_ptr(&obj_stock); 3071 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ 3072 drain_obj_stock(stock); 3073 obj_cgroup_get(objcg); 3074 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3075 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3076 WRITE_ONCE(stock->cached_objcg, objcg); 3077 3078 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3079 } 3080 stock->nr_bytes += nr_bytes; 3081 3082 if (pgdat) 3083 __account_obj_stock(objcg, stock, nr_acct, pgdat, idx); 3084 3085 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3086 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3087 stock->nr_bytes &= (PAGE_SIZE - 1); 3088 } 3089 3090 local_unlock(&obj_stock.lock); 3091 out: 3092 if (nr_pages) 3093 obj_cgroup_uncharge_pages(objcg, nr_pages); 3094 } 3095 3096 static int obj_cgroup_charge_account(struct obj_cgroup *objcg, gfp_t gfp, size_t size, 3097 struct pglist_data *pgdat, enum node_stat_item idx) 3098 { 3099 unsigned int nr_pages, nr_bytes; 3100 int ret; 3101 3102 if (likely(consume_obj_stock(objcg, size, pgdat, idx))) 3103 return 0; 3104 3105 /* 3106 * In theory, objcg->nr_charged_bytes can have enough 3107 * pre-charged bytes to satisfy the allocation. However, 3108 * flushing objcg->nr_charged_bytes requires two atomic 3109 * operations, and objcg->nr_charged_bytes can't be big. 3110 * The shared objcg->nr_charged_bytes can also become a 3111 * performance bottleneck if all tasks of the same memcg are 3112 * trying to update it. So it's better to ignore it and try 3113 * grab some new pages. The stock's nr_bytes will be flushed to 3114 * objcg->nr_charged_bytes later on when objcg changes. 3115 * 3116 * The stock's nr_bytes may contain enough pre-charged bytes 3117 * to allow one less page from being charged, but we can't rely 3118 * on the pre-charged bytes not being changed outside of 3119 * consume_obj_stock() or refill_obj_stock(). So ignore those 3120 * pre-charged bytes as well when charging pages. To avoid a 3121 * page uncharge right after a page charge, we set the 3122 * allow_uncharge flag to false when calling refill_obj_stock() 3123 * to temporarily allow the pre-charged bytes to exceed the page 3124 * size limit. The maximum reachable value of the pre-charged 3125 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3126 * race. 3127 */ 3128 nr_pages = size >> PAGE_SHIFT; 3129 nr_bytes = size & (PAGE_SIZE - 1); 3130 3131 if (nr_bytes) 3132 nr_pages += 1; 3133 3134 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3135 if (!ret && (nr_bytes || pgdat)) 3136 refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0, 3137 false, size, pgdat, idx); 3138 3139 return ret; 3140 } 3141 3142 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3143 { 3144 return obj_cgroup_charge_account(objcg, gfp, size, NULL, 0); 3145 } 3146 3147 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3148 { 3149 refill_obj_stock(objcg, size, true, 0, NULL, 0); 3150 } 3151 3152 static inline size_t obj_full_size(struct kmem_cache *s) 3153 { 3154 /* 3155 * For each accounted object there is an extra space which is used 3156 * to store obj_cgroup membership. Charge it too. 3157 */ 3158 return s->size + sizeof(struct obj_cgroup *); 3159 } 3160 3161 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 3162 gfp_t flags, size_t size, void **p) 3163 { 3164 struct obj_cgroup *objcg; 3165 struct slab *slab; 3166 unsigned long off; 3167 size_t i; 3168 3169 /* 3170 * The obtained objcg pointer is safe to use within the current scope, 3171 * defined by current task or set_active_memcg() pair. 3172 * obj_cgroup_get() is used to get a permanent reference. 3173 */ 3174 objcg = current_obj_cgroup(); 3175 if (!objcg) 3176 return true; 3177 3178 /* 3179 * slab_alloc_node() avoids the NULL check, so we might be called with a 3180 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill 3181 * the whole requested size. 3182 * return success as there's nothing to free back 3183 */ 3184 if (unlikely(*p == NULL)) 3185 return true; 3186 3187 flags &= gfp_allowed_mask; 3188 3189 if (lru) { 3190 int ret; 3191 struct mem_cgroup *memcg; 3192 3193 memcg = get_mem_cgroup_from_objcg(objcg); 3194 ret = memcg_list_lru_alloc(memcg, lru, flags); 3195 css_put(&memcg->css); 3196 3197 if (ret) 3198 return false; 3199 } 3200 3201 for (i = 0; i < size; i++) { 3202 slab = virt_to_slab(p[i]); 3203 3204 if (!slab_obj_exts(slab) && 3205 alloc_slab_obj_exts(slab, s, flags, false)) { 3206 continue; 3207 } 3208 3209 /* 3210 * if we fail and size is 1, memcg_alloc_abort_single() will 3211 * just free the object, which is ok as we have not assigned 3212 * objcg to its obj_ext yet 3213 * 3214 * for larger sizes, kmem_cache_free_bulk() will uncharge 3215 * any objects that were already charged and obj_ext assigned 3216 * 3217 * TODO: we could batch this until slab_pgdat(slab) changes 3218 * between iterations, with a more complicated undo 3219 */ 3220 if (obj_cgroup_charge_account(objcg, flags, obj_full_size(s), 3221 slab_pgdat(slab), cache_vmstat_idx(s))) 3222 return false; 3223 3224 off = obj_to_index(s, slab, p[i]); 3225 obj_cgroup_get(objcg); 3226 slab_obj_exts(slab)[off].objcg = objcg; 3227 } 3228 3229 return true; 3230 } 3231 3232 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 3233 void **p, int objects, struct slabobj_ext *obj_exts) 3234 { 3235 size_t obj_size = obj_full_size(s); 3236 3237 for (int i = 0; i < objects; i++) { 3238 struct obj_cgroup *objcg; 3239 unsigned int off; 3240 3241 off = obj_to_index(s, slab, p[i]); 3242 objcg = obj_exts[off].objcg; 3243 if (!objcg) 3244 continue; 3245 3246 obj_exts[off].objcg = NULL; 3247 refill_obj_stock(objcg, obj_size, true, -obj_size, 3248 slab_pgdat(slab), cache_vmstat_idx(s)); 3249 obj_cgroup_put(objcg); 3250 } 3251 } 3252 3253 /* 3254 * The objcg is only set on the first page, so transfer it to all the 3255 * other pages. 3256 */ 3257 void split_page_memcg(struct page *page, unsigned order) 3258 { 3259 struct obj_cgroup *objcg = page_objcg(page); 3260 unsigned int i, nr = 1 << order; 3261 3262 if (!objcg) 3263 return; 3264 3265 for (i = 1; i < nr; i++) 3266 page_set_objcg(&page[i], objcg); 3267 3268 obj_cgroup_get_many(objcg, nr - 1); 3269 } 3270 3271 void folio_split_memcg_refs(struct folio *folio, unsigned old_order, 3272 unsigned new_order) 3273 { 3274 unsigned new_refs; 3275 3276 if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) 3277 return; 3278 3279 new_refs = (1 << (old_order - new_order)) - 1; 3280 css_get_many(&__folio_memcg(folio)->css, new_refs); 3281 } 3282 3283 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3284 { 3285 unsigned long val; 3286 3287 if (mem_cgroup_is_root(memcg)) { 3288 /* 3289 * Approximate root's usage from global state. This isn't 3290 * perfect, but the root usage was always an approximation. 3291 */ 3292 val = global_node_page_state(NR_FILE_PAGES) + 3293 global_node_page_state(NR_ANON_MAPPED); 3294 if (swap) 3295 val += total_swap_pages - get_nr_swap_pages(); 3296 } else { 3297 if (!swap) 3298 val = page_counter_read(&memcg->memory); 3299 else 3300 val = page_counter_read(&memcg->memsw); 3301 } 3302 return val; 3303 } 3304 3305 static int memcg_online_kmem(struct mem_cgroup *memcg) 3306 { 3307 struct obj_cgroup *objcg; 3308 3309 if (mem_cgroup_kmem_disabled()) 3310 return 0; 3311 3312 if (unlikely(mem_cgroup_is_root(memcg))) 3313 return 0; 3314 3315 objcg = obj_cgroup_alloc(); 3316 if (!objcg) 3317 return -ENOMEM; 3318 3319 objcg->memcg = memcg; 3320 rcu_assign_pointer(memcg->objcg, objcg); 3321 obj_cgroup_get(objcg); 3322 memcg->orig_objcg = objcg; 3323 3324 static_branch_enable(&memcg_kmem_online_key); 3325 3326 memcg->kmemcg_id = memcg->id.id; 3327 3328 return 0; 3329 } 3330 3331 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3332 { 3333 struct mem_cgroup *parent; 3334 3335 if (mem_cgroup_kmem_disabled()) 3336 return; 3337 3338 if (unlikely(mem_cgroup_is_root(memcg))) 3339 return; 3340 3341 parent = parent_mem_cgroup(memcg); 3342 if (!parent) 3343 parent = root_mem_cgroup; 3344 3345 memcg_reparent_list_lrus(memcg, parent); 3346 3347 /* 3348 * Objcg's reparenting must be after list_lru's, make sure list_lru 3349 * helpers won't use parent's list_lru until child is drained. 3350 */ 3351 memcg_reparent_objcgs(memcg, parent); 3352 } 3353 3354 #ifdef CONFIG_CGROUP_WRITEBACK 3355 3356 #include <trace/events/writeback.h> 3357 3358 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3359 { 3360 return wb_domain_init(&memcg->cgwb_domain, gfp); 3361 } 3362 3363 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3364 { 3365 wb_domain_exit(&memcg->cgwb_domain); 3366 } 3367 3368 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3369 { 3370 wb_domain_size_changed(&memcg->cgwb_domain); 3371 } 3372 3373 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3374 { 3375 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3376 3377 if (!memcg->css.parent) 3378 return NULL; 3379 3380 return &memcg->cgwb_domain; 3381 } 3382 3383 /** 3384 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3385 * @wb: bdi_writeback in question 3386 * @pfilepages: out parameter for number of file pages 3387 * @pheadroom: out parameter for number of allocatable pages according to memcg 3388 * @pdirty: out parameter for number of dirty pages 3389 * @pwriteback: out parameter for number of pages under writeback 3390 * 3391 * Determine the numbers of file, headroom, dirty, and writeback pages in 3392 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3393 * is a bit more involved. 3394 * 3395 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3396 * headroom is calculated as the lowest headroom of itself and the 3397 * ancestors. Note that this doesn't consider the actual amount of 3398 * available memory in the system. The caller should further cap 3399 * *@pheadroom accordingly. 3400 */ 3401 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3402 unsigned long *pheadroom, unsigned long *pdirty, 3403 unsigned long *pwriteback) 3404 { 3405 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3406 struct mem_cgroup *parent; 3407 3408 mem_cgroup_flush_stats_ratelimited(memcg); 3409 3410 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3411 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3412 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 3413 memcg_page_state(memcg, NR_ACTIVE_FILE); 3414 3415 *pheadroom = PAGE_COUNTER_MAX; 3416 while ((parent = parent_mem_cgroup(memcg))) { 3417 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 3418 READ_ONCE(memcg->memory.high)); 3419 unsigned long used = page_counter_read(&memcg->memory); 3420 3421 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3422 memcg = parent; 3423 } 3424 } 3425 3426 /* 3427 * Foreign dirty flushing 3428 * 3429 * There's an inherent mismatch between memcg and writeback. The former 3430 * tracks ownership per-page while the latter per-inode. This was a 3431 * deliberate design decision because honoring per-page ownership in the 3432 * writeback path is complicated, may lead to higher CPU and IO overheads 3433 * and deemed unnecessary given that write-sharing an inode across 3434 * different cgroups isn't a common use-case. 3435 * 3436 * Combined with inode majority-writer ownership switching, this works well 3437 * enough in most cases but there are some pathological cases. For 3438 * example, let's say there are two cgroups A and B which keep writing to 3439 * different but confined parts of the same inode. B owns the inode and 3440 * A's memory is limited far below B's. A's dirty ratio can rise enough to 3441 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 3442 * triggering background writeback. A will be slowed down without a way to 3443 * make writeback of the dirty pages happen. 3444 * 3445 * Conditions like the above can lead to a cgroup getting repeatedly and 3446 * severely throttled after making some progress after each 3447 * dirty_expire_interval while the underlying IO device is almost 3448 * completely idle. 3449 * 3450 * Solving this problem completely requires matching the ownership tracking 3451 * granularities between memcg and writeback in either direction. However, 3452 * the more egregious behaviors can be avoided by simply remembering the 3453 * most recent foreign dirtying events and initiating remote flushes on 3454 * them when local writeback isn't enough to keep the memory clean enough. 3455 * 3456 * The following two functions implement such mechanism. When a foreign 3457 * page - a page whose memcg and writeback ownerships don't match - is 3458 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 3459 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 3460 * decides that the memcg needs to sleep due to high dirty ratio, it calls 3461 * mem_cgroup_flush_foreign() which queues writeback on the recorded 3462 * foreign bdi_writebacks which haven't expired. Both the numbers of 3463 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 3464 * limited to MEMCG_CGWB_FRN_CNT. 3465 * 3466 * The mechanism only remembers IDs and doesn't hold any object references. 3467 * As being wrong occasionally doesn't matter, updates and accesses to the 3468 * records are lockless and racy. 3469 */ 3470 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 3471 struct bdi_writeback *wb) 3472 { 3473 struct mem_cgroup *memcg = folio_memcg(folio); 3474 struct memcg_cgwb_frn *frn; 3475 u64 now = get_jiffies_64(); 3476 u64 oldest_at = now; 3477 int oldest = -1; 3478 int i; 3479 3480 trace_track_foreign_dirty(folio, wb); 3481 3482 /* 3483 * Pick the slot to use. If there is already a slot for @wb, keep 3484 * using it. If not replace the oldest one which isn't being 3485 * written out. 3486 */ 3487 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3488 frn = &memcg->cgwb_frn[i]; 3489 if (frn->bdi_id == wb->bdi->id && 3490 frn->memcg_id == wb->memcg_css->id) 3491 break; 3492 if (time_before64(frn->at, oldest_at) && 3493 atomic_read(&frn->done.cnt) == 1) { 3494 oldest = i; 3495 oldest_at = frn->at; 3496 } 3497 } 3498 3499 if (i < MEMCG_CGWB_FRN_CNT) { 3500 /* 3501 * Re-using an existing one. Update timestamp lazily to 3502 * avoid making the cacheline hot. We want them to be 3503 * reasonably up-to-date and significantly shorter than 3504 * dirty_expire_interval as that's what expires the record. 3505 * Use the shorter of 1s and dirty_expire_interval / 8. 3506 */ 3507 unsigned long update_intv = 3508 min_t(unsigned long, HZ, 3509 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 3510 3511 if (time_before64(frn->at, now - update_intv)) 3512 frn->at = now; 3513 } else if (oldest >= 0) { 3514 /* replace the oldest free one */ 3515 frn = &memcg->cgwb_frn[oldest]; 3516 frn->bdi_id = wb->bdi->id; 3517 frn->memcg_id = wb->memcg_css->id; 3518 frn->at = now; 3519 } 3520 } 3521 3522 /* issue foreign writeback flushes for recorded foreign dirtying events */ 3523 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 3524 { 3525 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3526 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 3527 u64 now = jiffies_64; 3528 int i; 3529 3530 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3531 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 3532 3533 /* 3534 * If the record is older than dirty_expire_interval, 3535 * writeback on it has already started. No need to kick it 3536 * off again. Also, don't start a new one if there's 3537 * already one in flight. 3538 */ 3539 if (time_after64(frn->at, now - intv) && 3540 atomic_read(&frn->done.cnt) == 1) { 3541 frn->at = 0; 3542 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 3543 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 3544 WB_REASON_FOREIGN_FLUSH, 3545 &frn->done); 3546 } 3547 } 3548 } 3549 3550 #else /* CONFIG_CGROUP_WRITEBACK */ 3551 3552 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3553 { 3554 return 0; 3555 } 3556 3557 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3558 { 3559 } 3560 3561 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3562 { 3563 } 3564 3565 #endif /* CONFIG_CGROUP_WRITEBACK */ 3566 3567 /* 3568 * Private memory cgroup IDR 3569 * 3570 * Swap-out records and page cache shadow entries need to store memcg 3571 * references in constrained space, so we maintain an ID space that is 3572 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 3573 * memory-controlled cgroups to 64k. 3574 * 3575 * However, there usually are many references to the offline CSS after 3576 * the cgroup has been destroyed, such as page cache or reclaimable 3577 * slab objects, that don't need to hang on to the ID. We want to keep 3578 * those dead CSS from occupying IDs, or we might quickly exhaust the 3579 * relatively small ID space and prevent the creation of new cgroups 3580 * even when there are much fewer than 64k cgroups - possibly none. 3581 * 3582 * Maintain a private 16-bit ID space for memcg, and allow the ID to 3583 * be freed and recycled when it's no longer needed, which is usually 3584 * when the CSS is offlined. 3585 * 3586 * The only exception to that are records of swapped out tmpfs/shmem 3587 * pages that need to be attributed to live ancestors on swapin. But 3588 * those references are manageable from userspace. 3589 */ 3590 3591 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 3592 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids); 3593 3594 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 3595 { 3596 if (memcg->id.id > 0) { 3597 xa_erase(&mem_cgroup_ids, memcg->id.id); 3598 memcg->id.id = 0; 3599 } 3600 } 3601 3602 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 3603 unsigned int n) 3604 { 3605 refcount_add(n, &memcg->id.ref); 3606 } 3607 3608 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 3609 { 3610 if (refcount_sub_and_test(n, &memcg->id.ref)) { 3611 mem_cgroup_id_remove(memcg); 3612 3613 /* Memcg ID pins CSS */ 3614 css_put(&memcg->css); 3615 } 3616 } 3617 3618 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 3619 { 3620 mem_cgroup_id_put_many(memcg, 1); 3621 } 3622 3623 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 3624 { 3625 while (!refcount_inc_not_zero(&memcg->id.ref)) { 3626 /* 3627 * The root cgroup cannot be destroyed, so it's refcount must 3628 * always be >= 1. 3629 */ 3630 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 3631 VM_BUG_ON(1); 3632 break; 3633 } 3634 memcg = parent_mem_cgroup(memcg); 3635 if (!memcg) 3636 memcg = root_mem_cgroup; 3637 } 3638 return memcg; 3639 } 3640 3641 /** 3642 * mem_cgroup_from_id - look up a memcg from a memcg id 3643 * @id: the memcg id to look up 3644 * 3645 * Caller must hold rcu_read_lock(). 3646 */ 3647 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 3648 { 3649 WARN_ON_ONCE(!rcu_read_lock_held()); 3650 return xa_load(&mem_cgroup_ids, id); 3651 } 3652 3653 #ifdef CONFIG_SHRINKER_DEBUG 3654 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 3655 { 3656 struct cgroup *cgrp; 3657 struct cgroup_subsys_state *css; 3658 struct mem_cgroup *memcg; 3659 3660 cgrp = cgroup_get_from_id(ino); 3661 if (IS_ERR(cgrp)) 3662 return ERR_CAST(cgrp); 3663 3664 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 3665 if (css) 3666 memcg = container_of(css, struct mem_cgroup, css); 3667 else 3668 memcg = ERR_PTR(-ENOENT); 3669 3670 cgroup_put(cgrp); 3671 3672 return memcg; 3673 } 3674 #endif 3675 3676 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn) 3677 { 3678 if (!pn) 3679 return; 3680 3681 free_percpu(pn->lruvec_stats_percpu); 3682 kfree(pn->lruvec_stats); 3683 kfree(pn); 3684 } 3685 3686 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 3687 { 3688 struct mem_cgroup_per_node *pn; 3689 3690 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO, 3691 node); 3692 if (!pn) 3693 return false; 3694 3695 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), 3696 GFP_KERNEL_ACCOUNT, node); 3697 if (!pn->lruvec_stats) 3698 goto fail; 3699 3700 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 3701 GFP_KERNEL_ACCOUNT); 3702 if (!pn->lruvec_stats_percpu) 3703 goto fail; 3704 3705 lruvec_init(&pn->lruvec); 3706 pn->memcg = memcg; 3707 3708 memcg->nodeinfo[node] = pn; 3709 return true; 3710 fail: 3711 free_mem_cgroup_per_node_info(pn); 3712 return false; 3713 } 3714 3715 static void __mem_cgroup_free(struct mem_cgroup *memcg) 3716 { 3717 int node; 3718 3719 obj_cgroup_put(memcg->orig_objcg); 3720 3721 for_each_node(node) 3722 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); 3723 memcg1_free_events(memcg); 3724 kfree(memcg->vmstats); 3725 free_percpu(memcg->vmstats_percpu); 3726 kfree(memcg); 3727 } 3728 3729 static void mem_cgroup_free(struct mem_cgroup *memcg) 3730 { 3731 lru_gen_exit_memcg(memcg); 3732 memcg_wb_domain_exit(memcg); 3733 __mem_cgroup_free(memcg); 3734 } 3735 3736 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) 3737 { 3738 struct memcg_vmstats_percpu *statc; 3739 struct memcg_vmstats_percpu __percpu *pstatc_pcpu; 3740 struct mem_cgroup *memcg; 3741 int node, cpu; 3742 int __maybe_unused i; 3743 long error; 3744 3745 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL); 3746 if (!memcg) 3747 return ERR_PTR(-ENOMEM); 3748 3749 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, 3750 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL); 3751 if (error) 3752 goto fail; 3753 error = -ENOMEM; 3754 3755 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), 3756 GFP_KERNEL_ACCOUNT); 3757 if (!memcg->vmstats) 3758 goto fail; 3759 3760 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 3761 GFP_KERNEL_ACCOUNT); 3762 if (!memcg->vmstats_percpu) 3763 goto fail; 3764 3765 if (!memcg1_alloc_events(memcg)) 3766 goto fail; 3767 3768 for_each_possible_cpu(cpu) { 3769 if (parent) 3770 pstatc_pcpu = parent->vmstats_percpu; 3771 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3772 statc->parent_pcpu = parent ? pstatc_pcpu : NULL; 3773 statc->vmstats = memcg->vmstats; 3774 } 3775 3776 for_each_node(node) 3777 if (!alloc_mem_cgroup_per_node_info(memcg, node)) 3778 goto fail; 3779 3780 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 3781 goto fail; 3782 3783 INIT_WORK(&memcg->high_work, high_work_func); 3784 vmpressure_init(&memcg->vmpressure); 3785 INIT_LIST_HEAD(&memcg->memory_peaks); 3786 INIT_LIST_HEAD(&memcg->swap_peaks); 3787 spin_lock_init(&memcg->peaks_lock); 3788 memcg->socket_pressure = get_jiffies_64(); 3789 #if BITS_PER_LONG < 64 3790 seqlock_init(&memcg->socket_pressure_seqlock); 3791 #endif 3792 memcg1_memcg_init(memcg); 3793 memcg->kmemcg_id = -1; 3794 INIT_LIST_HEAD(&memcg->objcg_list); 3795 #ifdef CONFIG_CGROUP_WRITEBACK 3796 INIT_LIST_HEAD(&memcg->cgwb_list); 3797 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3798 memcg->cgwb_frn[i].done = 3799 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 3800 #endif 3801 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3802 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 3803 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 3804 memcg->deferred_split_queue.split_queue_len = 0; 3805 #endif 3806 lru_gen_init_memcg(memcg); 3807 return memcg; 3808 fail: 3809 mem_cgroup_id_remove(memcg); 3810 __mem_cgroup_free(memcg); 3811 return ERR_PTR(error); 3812 } 3813 3814 static struct cgroup_subsys_state * __ref 3815 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 3816 { 3817 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 3818 struct mem_cgroup *memcg, *old_memcg; 3819 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys); 3820 3821 old_memcg = set_active_memcg(parent); 3822 memcg = mem_cgroup_alloc(parent); 3823 set_active_memcg(old_memcg); 3824 if (IS_ERR(memcg)) 3825 return ERR_CAST(memcg); 3826 3827 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3828 memcg1_soft_limit_reset(memcg); 3829 #ifdef CONFIG_ZSWAP 3830 memcg->zswap_max = PAGE_COUNTER_MAX; 3831 WRITE_ONCE(memcg->zswap_writeback, true); 3832 #endif 3833 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3834 if (parent) { 3835 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 3836 3837 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); 3838 page_counter_init(&memcg->swap, &parent->swap, false); 3839 #ifdef CONFIG_MEMCG_V1 3840 memcg->memory.track_failcnt = !memcg_on_dfl; 3841 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 3842 page_counter_init(&memcg->kmem, &parent->kmem, false); 3843 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); 3844 #endif 3845 } else { 3846 init_memcg_stats(); 3847 init_memcg_events(); 3848 page_counter_init(&memcg->memory, NULL, true); 3849 page_counter_init(&memcg->swap, NULL, false); 3850 #ifdef CONFIG_MEMCG_V1 3851 page_counter_init(&memcg->kmem, NULL, false); 3852 page_counter_init(&memcg->tcpmem, NULL, false); 3853 #endif 3854 root_mem_cgroup = memcg; 3855 return &memcg->css; 3856 } 3857 3858 if (memcg_on_dfl && !cgroup_memory_nosocket) 3859 static_branch_inc(&memcg_sockets_enabled_key); 3860 3861 if (!cgroup_memory_nobpf) 3862 static_branch_inc(&memcg_bpf_enabled_key); 3863 3864 return &memcg->css; 3865 } 3866 3867 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 3868 { 3869 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3870 3871 if (memcg_online_kmem(memcg)) 3872 goto remove_id; 3873 3874 /* 3875 * A memcg must be visible for expand_shrinker_info() 3876 * by the time the maps are allocated. So, we allocate maps 3877 * here, when for_each_mem_cgroup() can't skip it. 3878 */ 3879 if (alloc_shrinker_info(memcg)) 3880 goto offline_kmem; 3881 3882 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) 3883 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 3884 FLUSH_TIME); 3885 lru_gen_online_memcg(memcg); 3886 3887 /* Online state pins memcg ID, memcg ID pins CSS */ 3888 refcount_set(&memcg->id.ref, 1); 3889 css_get(css); 3890 3891 /* 3892 * Ensure mem_cgroup_from_id() works once we're fully online. 3893 * 3894 * We could do this earlier and require callers to filter with 3895 * css_tryget_online(). But right now there are no users that 3896 * need earlier access, and the workingset code relies on the 3897 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So 3898 * publish it here at the end of onlining. This matches the 3899 * regular ID destruction during offlining. 3900 */ 3901 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); 3902 3903 return 0; 3904 offline_kmem: 3905 memcg_offline_kmem(memcg); 3906 remove_id: 3907 mem_cgroup_id_remove(memcg); 3908 return -ENOMEM; 3909 } 3910 3911 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 3912 { 3913 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3914 3915 memcg1_css_offline(memcg); 3916 3917 page_counter_set_min(&memcg->memory, 0); 3918 page_counter_set_low(&memcg->memory, 0); 3919 3920 zswap_memcg_offline_cleanup(memcg); 3921 3922 memcg_offline_kmem(memcg); 3923 reparent_shrinker_deferred(memcg); 3924 wb_memcg_offline(memcg); 3925 lru_gen_offline_memcg(memcg); 3926 3927 drain_all_stock(memcg); 3928 3929 mem_cgroup_id_put(memcg); 3930 } 3931 3932 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 3933 { 3934 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3935 3936 invalidate_reclaim_iterators(memcg); 3937 lru_gen_release_memcg(memcg); 3938 } 3939 3940 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 3941 { 3942 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3943 int __maybe_unused i; 3944 3945 #ifdef CONFIG_CGROUP_WRITEBACK 3946 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3947 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 3948 #endif 3949 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 3950 static_branch_dec(&memcg_sockets_enabled_key); 3951 3952 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) 3953 static_branch_dec(&memcg_sockets_enabled_key); 3954 3955 if (!cgroup_memory_nobpf) 3956 static_branch_dec(&memcg_bpf_enabled_key); 3957 3958 vmpressure_cleanup(&memcg->vmpressure); 3959 cancel_work_sync(&memcg->high_work); 3960 memcg1_remove_from_trees(memcg); 3961 free_shrinker_info(memcg); 3962 mem_cgroup_free(memcg); 3963 } 3964 3965 /** 3966 * mem_cgroup_css_reset - reset the states of a mem_cgroup 3967 * @css: the target css 3968 * 3969 * Reset the states of the mem_cgroup associated with @css. This is 3970 * invoked when the userland requests disabling on the default hierarchy 3971 * but the memcg is pinned through dependency. The memcg should stop 3972 * applying policies and should revert to the vanilla state as it may be 3973 * made visible again. 3974 * 3975 * The current implementation only resets the essential configurations. 3976 * This needs to be expanded to cover all the visible parts. 3977 */ 3978 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 3979 { 3980 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3981 3982 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 3983 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 3984 #ifdef CONFIG_MEMCG_V1 3985 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 3986 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 3987 #endif 3988 page_counter_set_min(&memcg->memory, 0); 3989 page_counter_set_low(&memcg->memory, 0); 3990 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3991 memcg1_soft_limit_reset(memcg); 3992 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3993 memcg_wb_domain_size_changed(memcg); 3994 } 3995 3996 struct aggregate_control { 3997 /* pointer to the aggregated (CPU and subtree aggregated) counters */ 3998 long *aggregate; 3999 /* pointer to the non-hierarchichal (CPU aggregated) counters */ 4000 long *local; 4001 /* pointer to the pending child counters during tree propagation */ 4002 long *pending; 4003 /* pointer to the parent's pending counters, could be NULL */ 4004 long *ppending; 4005 /* pointer to the percpu counters to be aggregated */ 4006 long *cstat; 4007 /* pointer to the percpu counters of the last aggregation*/ 4008 long *cstat_prev; 4009 /* size of the above counters */ 4010 int size; 4011 }; 4012 4013 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac) 4014 { 4015 int i; 4016 long delta, delta_cpu, v; 4017 4018 for (i = 0; i < ac->size; i++) { 4019 /* 4020 * Collect the aggregated propagation counts of groups 4021 * below us. We're in a per-cpu loop here and this is 4022 * a global counter, so the first cycle will get them. 4023 */ 4024 delta = ac->pending[i]; 4025 if (delta) 4026 ac->pending[i] = 0; 4027 4028 /* Add CPU changes on this level since the last flush */ 4029 delta_cpu = 0; 4030 v = READ_ONCE(ac->cstat[i]); 4031 if (v != ac->cstat_prev[i]) { 4032 delta_cpu = v - ac->cstat_prev[i]; 4033 delta += delta_cpu; 4034 ac->cstat_prev[i] = v; 4035 } 4036 4037 /* Aggregate counts on this level and propagate upwards */ 4038 if (delta_cpu) 4039 ac->local[i] += delta_cpu; 4040 4041 if (delta) { 4042 ac->aggregate[i] += delta; 4043 if (ac->ppending) 4044 ac->ppending[i] += delta; 4045 } 4046 } 4047 } 4048 4049 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 4050 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, 4051 int cpu) 4052 { 4053 int nid; 4054 4055 if (atomic_read(&memcg->kmem_stat)) { 4056 int kmem = atomic_xchg(&memcg->kmem_stat, 0); 4057 int index = memcg_stats_index(MEMCG_KMEM); 4058 4059 memcg->vmstats->state[index] += kmem; 4060 if (parent) 4061 parent->vmstats->state_pending[index] += kmem; 4062 } 4063 4064 for_each_node_state(nid, N_MEMORY) { 4065 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 4066 struct lruvec_stats *lstats = pn->lruvec_stats; 4067 struct lruvec_stats *plstats = NULL; 4068 4069 if (parent) 4070 plstats = parent->nodeinfo[nid]->lruvec_stats; 4071 4072 if (atomic_read(&pn->slab_reclaimable)) { 4073 int slab = atomic_xchg(&pn->slab_reclaimable, 0); 4074 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B); 4075 4076 lstats->state[index] += slab; 4077 if (plstats) 4078 plstats->state_pending[index] += slab; 4079 } 4080 if (atomic_read(&pn->slab_unreclaimable)) { 4081 int slab = atomic_xchg(&pn->slab_unreclaimable, 0); 4082 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B); 4083 4084 lstats->state[index] += slab; 4085 if (plstats) 4086 plstats->state_pending[index] += slab; 4087 } 4088 } 4089 } 4090 #else 4091 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, 4092 int cpu) 4093 {} 4094 #endif 4095 4096 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 4097 { 4098 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4099 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 4100 struct memcg_vmstats_percpu *statc; 4101 struct aggregate_control ac; 4102 int nid; 4103 4104 flush_nmi_stats(memcg, parent, cpu); 4105 4106 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 4107 4108 ac = (struct aggregate_control) { 4109 .aggregate = memcg->vmstats->state, 4110 .local = memcg->vmstats->state_local, 4111 .pending = memcg->vmstats->state_pending, 4112 .ppending = parent ? parent->vmstats->state_pending : NULL, 4113 .cstat = statc->state, 4114 .cstat_prev = statc->state_prev, 4115 .size = MEMCG_VMSTAT_SIZE, 4116 }; 4117 mem_cgroup_stat_aggregate(&ac); 4118 4119 ac = (struct aggregate_control) { 4120 .aggregate = memcg->vmstats->events, 4121 .local = memcg->vmstats->events_local, 4122 .pending = memcg->vmstats->events_pending, 4123 .ppending = parent ? parent->vmstats->events_pending : NULL, 4124 .cstat = statc->events, 4125 .cstat_prev = statc->events_prev, 4126 .size = NR_MEMCG_EVENTS, 4127 }; 4128 mem_cgroup_stat_aggregate(&ac); 4129 4130 for_each_node_state(nid, N_MEMORY) { 4131 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 4132 struct lruvec_stats *lstats = pn->lruvec_stats; 4133 struct lruvec_stats *plstats = NULL; 4134 struct lruvec_stats_percpu *lstatc; 4135 4136 if (parent) 4137 plstats = parent->nodeinfo[nid]->lruvec_stats; 4138 4139 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 4140 4141 ac = (struct aggregate_control) { 4142 .aggregate = lstats->state, 4143 .local = lstats->state_local, 4144 .pending = lstats->state_pending, 4145 .ppending = plstats ? plstats->state_pending : NULL, 4146 .cstat = lstatc->state, 4147 .cstat_prev = lstatc->state_prev, 4148 .size = NR_MEMCG_NODE_STAT_ITEMS, 4149 }; 4150 mem_cgroup_stat_aggregate(&ac); 4151 4152 } 4153 WRITE_ONCE(statc->stats_updates, 0); 4154 /* We are in a per-cpu loop here, only do the atomic write once */ 4155 if (atomic_read(&memcg->vmstats->stats_updates)) 4156 atomic_set(&memcg->vmstats->stats_updates, 0); 4157 } 4158 4159 static void mem_cgroup_fork(struct task_struct *task) 4160 { 4161 /* 4162 * Set the update flag to cause task->objcg to be initialized lazily 4163 * on the first allocation. It can be done without any synchronization 4164 * because it's always performed on the current task, so does 4165 * current_objcg_update(). 4166 */ 4167 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; 4168 } 4169 4170 static void mem_cgroup_exit(struct task_struct *task) 4171 { 4172 struct obj_cgroup *objcg = task->objcg; 4173 4174 objcg = (struct obj_cgroup *) 4175 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); 4176 obj_cgroup_put(objcg); 4177 4178 /* 4179 * Some kernel allocations can happen after this point, 4180 * but let's ignore them. It can be done without any synchronization 4181 * because it's always performed on the current task, so does 4182 * current_objcg_update(). 4183 */ 4184 task->objcg = NULL; 4185 } 4186 4187 #ifdef CONFIG_LRU_GEN 4188 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) 4189 { 4190 struct task_struct *task; 4191 struct cgroup_subsys_state *css; 4192 4193 /* find the first leader if there is any */ 4194 cgroup_taskset_for_each_leader(task, css, tset) 4195 break; 4196 4197 if (!task) 4198 return; 4199 4200 task_lock(task); 4201 if (task->mm && READ_ONCE(task->mm->owner) == task) 4202 lru_gen_migrate_mm(task->mm); 4203 task_unlock(task); 4204 } 4205 #else 4206 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {} 4207 #endif /* CONFIG_LRU_GEN */ 4208 4209 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) 4210 { 4211 struct task_struct *task; 4212 struct cgroup_subsys_state *css; 4213 4214 cgroup_taskset_for_each(task, css, tset) { 4215 /* atomically set the update bit */ 4216 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); 4217 } 4218 } 4219 4220 static void mem_cgroup_attach(struct cgroup_taskset *tset) 4221 { 4222 mem_cgroup_lru_gen_attach(tset); 4223 mem_cgroup_kmem_attach(tset); 4224 } 4225 4226 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 4227 { 4228 if (value == PAGE_COUNTER_MAX) 4229 seq_puts(m, "max\n"); 4230 else 4231 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 4232 4233 return 0; 4234 } 4235 4236 static u64 memory_current_read(struct cgroup_subsys_state *css, 4237 struct cftype *cft) 4238 { 4239 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4240 4241 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 4242 } 4243 4244 #define OFP_PEAK_UNSET (((-1UL))) 4245 4246 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc) 4247 { 4248 struct cgroup_of_peak *ofp = of_peak(sf->private); 4249 u64 fd_peak = READ_ONCE(ofp->value), peak; 4250 4251 /* User wants global or local peak? */ 4252 if (fd_peak == OFP_PEAK_UNSET) 4253 peak = pc->watermark; 4254 else 4255 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); 4256 4257 seq_printf(sf, "%llu\n", peak * PAGE_SIZE); 4258 return 0; 4259 } 4260 4261 static int memory_peak_show(struct seq_file *sf, void *v) 4262 { 4263 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 4264 4265 return peak_show(sf, v, &memcg->memory); 4266 } 4267 4268 static int peak_open(struct kernfs_open_file *of) 4269 { 4270 struct cgroup_of_peak *ofp = of_peak(of); 4271 4272 ofp->value = OFP_PEAK_UNSET; 4273 return 0; 4274 } 4275 4276 static void peak_release(struct kernfs_open_file *of) 4277 { 4278 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4279 struct cgroup_of_peak *ofp = of_peak(of); 4280 4281 if (ofp->value == OFP_PEAK_UNSET) { 4282 /* fast path (no writes on this fd) */ 4283 return; 4284 } 4285 spin_lock(&memcg->peaks_lock); 4286 list_del(&ofp->list); 4287 spin_unlock(&memcg->peaks_lock); 4288 } 4289 4290 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, 4291 loff_t off, struct page_counter *pc, 4292 struct list_head *watchers) 4293 { 4294 unsigned long usage; 4295 struct cgroup_of_peak *peer_ctx; 4296 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4297 struct cgroup_of_peak *ofp = of_peak(of); 4298 4299 spin_lock(&memcg->peaks_lock); 4300 4301 usage = page_counter_read(pc); 4302 WRITE_ONCE(pc->local_watermark, usage); 4303 4304 list_for_each_entry(peer_ctx, watchers, list) 4305 if (usage > peer_ctx->value) 4306 WRITE_ONCE(peer_ctx->value, usage); 4307 4308 /* initial write, register watcher */ 4309 if (ofp->value == OFP_PEAK_UNSET) 4310 list_add(&ofp->list, watchers); 4311 4312 WRITE_ONCE(ofp->value, usage); 4313 spin_unlock(&memcg->peaks_lock); 4314 4315 return nbytes; 4316 } 4317 4318 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf, 4319 size_t nbytes, loff_t off) 4320 { 4321 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4322 4323 return peak_write(of, buf, nbytes, off, &memcg->memory, 4324 &memcg->memory_peaks); 4325 } 4326 4327 #undef OFP_PEAK_UNSET 4328 4329 static int memory_min_show(struct seq_file *m, void *v) 4330 { 4331 return seq_puts_memcg_tunable(m, 4332 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 4333 } 4334 4335 static ssize_t memory_min_write(struct kernfs_open_file *of, 4336 char *buf, size_t nbytes, loff_t off) 4337 { 4338 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4339 unsigned long min; 4340 int err; 4341 4342 buf = strstrip(buf); 4343 err = page_counter_memparse(buf, "max", &min); 4344 if (err) 4345 return err; 4346 4347 page_counter_set_min(&memcg->memory, min); 4348 4349 return nbytes; 4350 } 4351 4352 static int memory_low_show(struct seq_file *m, void *v) 4353 { 4354 return seq_puts_memcg_tunable(m, 4355 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 4356 } 4357 4358 static ssize_t memory_low_write(struct kernfs_open_file *of, 4359 char *buf, size_t nbytes, loff_t off) 4360 { 4361 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4362 unsigned long low; 4363 int err; 4364 4365 buf = strstrip(buf); 4366 err = page_counter_memparse(buf, "max", &low); 4367 if (err) 4368 return err; 4369 4370 page_counter_set_low(&memcg->memory, low); 4371 4372 return nbytes; 4373 } 4374 4375 static int memory_high_show(struct seq_file *m, void *v) 4376 { 4377 return seq_puts_memcg_tunable(m, 4378 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 4379 } 4380 4381 static ssize_t memory_high_write(struct kernfs_open_file *of, 4382 char *buf, size_t nbytes, loff_t off) 4383 { 4384 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4385 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4386 bool drained = false; 4387 unsigned long high; 4388 int err; 4389 4390 buf = strstrip(buf); 4391 err = page_counter_memparse(buf, "max", &high); 4392 if (err) 4393 return err; 4394 4395 page_counter_set_high(&memcg->memory, high); 4396 4397 if (of->file->f_flags & O_NONBLOCK) 4398 goto out; 4399 4400 for (;;) { 4401 unsigned long nr_pages = page_counter_read(&memcg->memory); 4402 unsigned long reclaimed; 4403 4404 if (nr_pages <= high) 4405 break; 4406 4407 if (signal_pending(current)) 4408 break; 4409 4410 if (!drained) { 4411 drain_all_stock(memcg); 4412 drained = true; 4413 continue; 4414 } 4415 4416 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 4417 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); 4418 4419 if (!reclaimed && !nr_retries--) 4420 break; 4421 } 4422 out: 4423 memcg_wb_domain_size_changed(memcg); 4424 return nbytes; 4425 } 4426 4427 static int memory_max_show(struct seq_file *m, void *v) 4428 { 4429 return seq_puts_memcg_tunable(m, 4430 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 4431 } 4432 4433 static ssize_t memory_max_write(struct kernfs_open_file *of, 4434 char *buf, size_t nbytes, loff_t off) 4435 { 4436 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4437 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 4438 bool drained = false; 4439 unsigned long max; 4440 int err; 4441 4442 buf = strstrip(buf); 4443 err = page_counter_memparse(buf, "max", &max); 4444 if (err) 4445 return err; 4446 4447 xchg(&memcg->memory.max, max); 4448 4449 if (of->file->f_flags & O_NONBLOCK) 4450 goto out; 4451 4452 for (;;) { 4453 unsigned long nr_pages = page_counter_read(&memcg->memory); 4454 4455 if (nr_pages <= max) 4456 break; 4457 4458 if (signal_pending(current)) 4459 break; 4460 4461 if (!drained) { 4462 drain_all_stock(memcg); 4463 drained = true; 4464 continue; 4465 } 4466 4467 if (nr_reclaims) { 4468 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 4469 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) 4470 nr_reclaims--; 4471 continue; 4472 } 4473 4474 memcg_memory_event(memcg, MEMCG_OOM); 4475 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 4476 break; 4477 cond_resched(); 4478 } 4479 out: 4480 memcg_wb_domain_size_changed(memcg); 4481 return nbytes; 4482 } 4483 4484 /* 4485 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener' 4486 * if any new events become available. 4487 */ 4488 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 4489 { 4490 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 4491 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 4492 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 4493 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 4494 seq_printf(m, "oom_kill %lu\n", 4495 atomic_long_read(&events[MEMCG_OOM_KILL])); 4496 seq_printf(m, "oom_group_kill %lu\n", 4497 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 4498 seq_printf(m, "sock_throttled %lu\n", 4499 atomic_long_read(&events[MEMCG_SOCK_THROTTLED])); 4500 } 4501 4502 static int memory_events_show(struct seq_file *m, void *v) 4503 { 4504 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4505 4506 __memory_events_show(m, memcg->memory_events); 4507 return 0; 4508 } 4509 4510 static int memory_events_local_show(struct seq_file *m, void *v) 4511 { 4512 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4513 4514 __memory_events_show(m, memcg->memory_events_local); 4515 return 0; 4516 } 4517 4518 int memory_stat_show(struct seq_file *m, void *v) 4519 { 4520 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4521 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL); 4522 struct seq_buf s; 4523 4524 if (!buf) 4525 return -ENOMEM; 4526 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 4527 memory_stat_format(memcg, &s); 4528 seq_puts(m, buf); 4529 kfree(buf); 4530 return 0; 4531 } 4532 4533 #ifdef CONFIG_NUMA 4534 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 4535 int item) 4536 { 4537 return lruvec_page_state(lruvec, item) * 4538 memcg_page_state_output_unit(item); 4539 } 4540 4541 static int memory_numa_stat_show(struct seq_file *m, void *v) 4542 { 4543 int i; 4544 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4545 4546 mem_cgroup_flush_stats(memcg); 4547 4548 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 4549 int nid; 4550 4551 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 4552 continue; 4553 4554 seq_printf(m, "%s", memory_stats[i].name); 4555 for_each_node_state(nid, N_MEMORY) { 4556 u64 size; 4557 struct lruvec *lruvec; 4558 4559 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 4560 size = lruvec_page_state_output(lruvec, 4561 memory_stats[i].idx); 4562 seq_printf(m, " N%d=%llu", nid, size); 4563 } 4564 seq_putc(m, '\n'); 4565 } 4566 4567 return 0; 4568 } 4569 #endif 4570 4571 static int memory_oom_group_show(struct seq_file *m, void *v) 4572 { 4573 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4574 4575 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 4576 4577 return 0; 4578 } 4579 4580 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 4581 char *buf, size_t nbytes, loff_t off) 4582 { 4583 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4584 int ret, oom_group; 4585 4586 buf = strstrip(buf); 4587 if (!buf) 4588 return -EINVAL; 4589 4590 ret = kstrtoint(buf, 0, &oom_group); 4591 if (ret) 4592 return ret; 4593 4594 if (oom_group != 0 && oom_group != 1) 4595 return -EINVAL; 4596 4597 WRITE_ONCE(memcg->oom_group, oom_group); 4598 4599 return nbytes; 4600 } 4601 4602 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 4603 size_t nbytes, loff_t off) 4604 { 4605 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4606 int ret; 4607 4608 ret = user_proactive_reclaim(buf, memcg, NULL); 4609 if (ret) 4610 return ret; 4611 4612 return nbytes; 4613 } 4614 4615 static struct cftype memory_files[] = { 4616 { 4617 .name = "current", 4618 .flags = CFTYPE_NOT_ON_ROOT, 4619 .read_u64 = memory_current_read, 4620 }, 4621 { 4622 .name = "peak", 4623 .flags = CFTYPE_NOT_ON_ROOT, 4624 .open = peak_open, 4625 .release = peak_release, 4626 .seq_show = memory_peak_show, 4627 .write = memory_peak_write, 4628 }, 4629 { 4630 .name = "min", 4631 .flags = CFTYPE_NOT_ON_ROOT, 4632 .seq_show = memory_min_show, 4633 .write = memory_min_write, 4634 }, 4635 { 4636 .name = "low", 4637 .flags = CFTYPE_NOT_ON_ROOT, 4638 .seq_show = memory_low_show, 4639 .write = memory_low_write, 4640 }, 4641 { 4642 .name = "high", 4643 .flags = CFTYPE_NOT_ON_ROOT, 4644 .seq_show = memory_high_show, 4645 .write = memory_high_write, 4646 }, 4647 { 4648 .name = "max", 4649 .flags = CFTYPE_NOT_ON_ROOT, 4650 .seq_show = memory_max_show, 4651 .write = memory_max_write, 4652 }, 4653 { 4654 .name = "events", 4655 .flags = CFTYPE_NOT_ON_ROOT, 4656 .file_offset = offsetof(struct mem_cgroup, events_file), 4657 .seq_show = memory_events_show, 4658 }, 4659 { 4660 .name = "events.local", 4661 .flags = CFTYPE_NOT_ON_ROOT, 4662 .file_offset = offsetof(struct mem_cgroup, events_local_file), 4663 .seq_show = memory_events_local_show, 4664 }, 4665 { 4666 .name = "stat", 4667 .seq_show = memory_stat_show, 4668 }, 4669 #ifdef CONFIG_NUMA 4670 { 4671 .name = "numa_stat", 4672 .seq_show = memory_numa_stat_show, 4673 }, 4674 #endif 4675 { 4676 .name = "oom.group", 4677 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 4678 .seq_show = memory_oom_group_show, 4679 .write = memory_oom_group_write, 4680 }, 4681 { 4682 .name = "reclaim", 4683 .flags = CFTYPE_NS_DELEGATABLE, 4684 .write = memory_reclaim, 4685 }, 4686 { } /* terminate */ 4687 }; 4688 4689 struct cgroup_subsys memory_cgrp_subsys = { 4690 .css_alloc = mem_cgroup_css_alloc, 4691 .css_online = mem_cgroup_css_online, 4692 .css_offline = mem_cgroup_css_offline, 4693 .css_released = mem_cgroup_css_released, 4694 .css_free = mem_cgroup_css_free, 4695 .css_reset = mem_cgroup_css_reset, 4696 .css_rstat_flush = mem_cgroup_css_rstat_flush, 4697 .attach = mem_cgroup_attach, 4698 .fork = mem_cgroup_fork, 4699 .exit = mem_cgroup_exit, 4700 .dfl_cftypes = memory_files, 4701 #ifdef CONFIG_MEMCG_V1 4702 .legacy_cftypes = mem_cgroup_legacy_files, 4703 #endif 4704 .early_init = 0, 4705 }; 4706 4707 /** 4708 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 4709 * @root: the top ancestor of the sub-tree being checked 4710 * @memcg: the memory cgroup to check 4711 * 4712 * WARNING: This function is not stateless! It can only be used as part 4713 * of a top-down tree iteration, not for isolated queries. 4714 */ 4715 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 4716 struct mem_cgroup *memcg) 4717 { 4718 bool recursive_protection = 4719 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT; 4720 4721 if (mem_cgroup_disabled()) 4722 return; 4723 4724 if (!root) 4725 root = root_mem_cgroup; 4726 4727 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); 4728 } 4729 4730 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 4731 gfp_t gfp) 4732 { 4733 int ret; 4734 4735 ret = try_charge(memcg, gfp, folio_nr_pages(folio)); 4736 if (ret) 4737 goto out; 4738 4739 css_get(&memcg->css); 4740 commit_charge(folio, memcg); 4741 memcg1_commit_charge(folio, memcg); 4742 out: 4743 return ret; 4744 } 4745 4746 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 4747 { 4748 struct mem_cgroup *memcg; 4749 int ret; 4750 4751 memcg = get_mem_cgroup_from_mm(mm); 4752 ret = charge_memcg(folio, memcg, gfp); 4753 css_put(&memcg->css); 4754 4755 return ret; 4756 } 4757 4758 /** 4759 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio 4760 * @folio: folio being charged 4761 * @gfp: reclaim mode 4762 * 4763 * This function is called when allocating a huge page folio, after the page has 4764 * already been obtained and charged to the appropriate hugetlb cgroup 4765 * controller (if it is enabled). 4766 * 4767 * Returns ENOMEM if the memcg is already full. 4768 * Returns 0 if either the charge was successful, or if we skip the charging. 4769 */ 4770 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) 4771 { 4772 struct mem_cgroup *memcg = get_mem_cgroup_from_current(); 4773 int ret = 0; 4774 4775 /* 4776 * Even memcg does not account for hugetlb, we still want to update 4777 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip 4778 * charging the memcg. 4779 */ 4780 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || 4781 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4782 goto out; 4783 4784 if (charge_memcg(folio, memcg, gfp)) 4785 ret = -ENOMEM; 4786 4787 out: 4788 mem_cgroup_put(memcg); 4789 return ret; 4790 } 4791 4792 /** 4793 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 4794 * @folio: folio to charge. 4795 * @mm: mm context of the victim 4796 * @gfp: reclaim mode 4797 * @entry: swap entry for which the folio is allocated 4798 * 4799 * This function charges a folio allocated for swapin. Please call this before 4800 * adding the folio to the swapcache. 4801 * 4802 * Returns 0 on success. Otherwise, an error code is returned. 4803 */ 4804 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 4805 gfp_t gfp, swp_entry_t entry) 4806 { 4807 struct mem_cgroup *memcg; 4808 unsigned short id; 4809 int ret; 4810 4811 if (mem_cgroup_disabled()) 4812 return 0; 4813 4814 id = lookup_swap_cgroup_id(entry); 4815 rcu_read_lock(); 4816 memcg = mem_cgroup_from_id(id); 4817 if (!memcg || !css_tryget_online(&memcg->css)) 4818 memcg = get_mem_cgroup_from_mm(mm); 4819 rcu_read_unlock(); 4820 4821 ret = charge_memcg(folio, memcg, gfp); 4822 4823 css_put(&memcg->css); 4824 return ret; 4825 } 4826 4827 struct uncharge_gather { 4828 struct mem_cgroup *memcg; 4829 unsigned long nr_memory; 4830 unsigned long pgpgout; 4831 unsigned long nr_kmem; 4832 int nid; 4833 }; 4834 4835 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 4836 { 4837 memset(ug, 0, sizeof(*ug)); 4838 } 4839 4840 static void uncharge_batch(const struct uncharge_gather *ug) 4841 { 4842 if (ug->nr_memory) { 4843 memcg_uncharge(ug->memcg, ug->nr_memory); 4844 if (ug->nr_kmem) { 4845 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); 4846 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); 4847 } 4848 memcg1_oom_recover(ug->memcg); 4849 } 4850 4851 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); 4852 4853 /* drop reference from uncharge_folio */ 4854 css_put(&ug->memcg->css); 4855 } 4856 4857 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 4858 { 4859 long nr_pages; 4860 struct mem_cgroup *memcg; 4861 struct obj_cgroup *objcg; 4862 4863 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4864 4865 /* 4866 * Nobody should be changing or seriously looking at 4867 * folio memcg or objcg at this point, we have fully 4868 * exclusive access to the folio. 4869 */ 4870 if (folio_memcg_kmem(folio)) { 4871 objcg = __folio_objcg(folio); 4872 /* 4873 * This get matches the put at the end of the function and 4874 * kmem pages do not hold memcg references anymore. 4875 */ 4876 memcg = get_mem_cgroup_from_objcg(objcg); 4877 } else { 4878 memcg = __folio_memcg(folio); 4879 } 4880 4881 if (!memcg) 4882 return; 4883 4884 if (ug->memcg != memcg) { 4885 if (ug->memcg) { 4886 uncharge_batch(ug); 4887 uncharge_gather_clear(ug); 4888 } 4889 ug->memcg = memcg; 4890 ug->nid = folio_nid(folio); 4891 4892 /* pairs with css_put in uncharge_batch */ 4893 css_get(&memcg->css); 4894 } 4895 4896 nr_pages = folio_nr_pages(folio); 4897 4898 if (folio_memcg_kmem(folio)) { 4899 ug->nr_memory += nr_pages; 4900 ug->nr_kmem += nr_pages; 4901 4902 folio->memcg_data = 0; 4903 obj_cgroup_put(objcg); 4904 } else { 4905 /* LRU pages aren't accounted at the root level */ 4906 if (!mem_cgroup_is_root(memcg)) 4907 ug->nr_memory += nr_pages; 4908 ug->pgpgout++; 4909 4910 WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); 4911 folio->memcg_data = 0; 4912 } 4913 4914 css_put(&memcg->css); 4915 } 4916 4917 void __mem_cgroup_uncharge(struct folio *folio) 4918 { 4919 struct uncharge_gather ug; 4920 4921 /* Don't touch folio->lru of any random page, pre-check: */ 4922 if (!folio_memcg_charged(folio)) 4923 return; 4924 4925 uncharge_gather_clear(&ug); 4926 uncharge_folio(folio, &ug); 4927 uncharge_batch(&ug); 4928 } 4929 4930 void __mem_cgroup_uncharge_folios(struct folio_batch *folios) 4931 { 4932 struct uncharge_gather ug; 4933 unsigned int i; 4934 4935 uncharge_gather_clear(&ug); 4936 for (i = 0; i < folios->nr; i++) 4937 uncharge_folio(folios->folios[i], &ug); 4938 if (ug.memcg) 4939 uncharge_batch(&ug); 4940 } 4941 4942 /** 4943 * mem_cgroup_replace_folio - Charge a folio's replacement. 4944 * @old: Currently circulating folio. 4945 * @new: Replacement folio. 4946 * 4947 * Charge @new as a replacement folio for @old. @old will 4948 * be uncharged upon free. 4949 * 4950 * Both folios must be locked, @new->mapping must be set up. 4951 */ 4952 void mem_cgroup_replace_folio(struct folio *old, struct folio *new) 4953 { 4954 struct mem_cgroup *memcg; 4955 long nr_pages = folio_nr_pages(new); 4956 4957 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 4958 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 4959 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 4960 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 4961 4962 if (mem_cgroup_disabled()) 4963 return; 4964 4965 /* Page cache replacement: new folio already charged? */ 4966 if (folio_memcg_charged(new)) 4967 return; 4968 4969 memcg = folio_memcg(old); 4970 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 4971 if (!memcg) 4972 return; 4973 4974 /* Force-charge the new page. The old one will be freed soon */ 4975 if (!mem_cgroup_is_root(memcg)) { 4976 page_counter_charge(&memcg->memory, nr_pages); 4977 if (do_memsw_account()) 4978 page_counter_charge(&memcg->memsw, nr_pages); 4979 } 4980 4981 css_get(&memcg->css); 4982 commit_charge(new, memcg); 4983 memcg1_commit_charge(new, memcg); 4984 } 4985 4986 /** 4987 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio. 4988 * @old: Currently circulating folio. 4989 * @new: Replacement folio. 4990 * 4991 * Transfer the memcg data from the old folio to the new folio for migration. 4992 * The old folio's data info will be cleared. Note that the memory counters 4993 * will remain unchanged throughout the process. 4994 * 4995 * Both folios must be locked, @new->mapping must be set up. 4996 */ 4997 void mem_cgroup_migrate(struct folio *old, struct folio *new) 4998 { 4999 struct mem_cgroup *memcg; 5000 5001 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 5002 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 5003 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 5004 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new); 5005 VM_BUG_ON_FOLIO(folio_test_lru(old), old); 5006 5007 if (mem_cgroup_disabled()) 5008 return; 5009 5010 memcg = folio_memcg(old); 5011 /* 5012 * Note that it is normal to see !memcg for a hugetlb folio. 5013 * For e.g, itt could have been allocated when memory_hugetlb_accounting 5014 * was not selected. 5015 */ 5016 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); 5017 if (!memcg) 5018 return; 5019 5020 /* Transfer the charge and the css ref */ 5021 commit_charge(new, memcg); 5022 5023 /* Warning should never happen, so don't worry about refcount non-0 */ 5024 WARN_ON_ONCE(folio_unqueue_deferred_split(old)); 5025 old->memcg_data = 0; 5026 } 5027 5028 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5029 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5030 5031 void mem_cgroup_sk_alloc(struct sock *sk) 5032 { 5033 struct mem_cgroup *memcg; 5034 5035 if (!mem_cgroup_sockets_enabled) 5036 return; 5037 5038 /* Do not associate the sock with unrelated interrupted task's memcg. */ 5039 if (!in_task()) 5040 return; 5041 5042 rcu_read_lock(); 5043 memcg = mem_cgroup_from_task(current); 5044 if (mem_cgroup_is_root(memcg)) 5045 goto out; 5046 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) 5047 goto out; 5048 if (css_tryget(&memcg->css)) 5049 sk->sk_memcg = memcg; 5050 out: 5051 rcu_read_unlock(); 5052 } 5053 5054 void mem_cgroup_sk_free(struct sock *sk) 5055 { 5056 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5057 5058 if (memcg) 5059 css_put(&memcg->css); 5060 } 5061 5062 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk) 5063 { 5064 struct mem_cgroup *memcg; 5065 5066 if (sk->sk_memcg == newsk->sk_memcg) 5067 return; 5068 5069 mem_cgroup_sk_free(newsk); 5070 5071 memcg = mem_cgroup_from_sk(sk); 5072 if (memcg) 5073 css_get(&memcg->css); 5074 5075 newsk->sk_memcg = sk->sk_memcg; 5076 } 5077 5078 /** 5079 * mem_cgroup_sk_charge - charge socket memory 5080 * @sk: socket in memcg to charge 5081 * @nr_pages: number of pages to charge 5082 * @gfp_mask: reclaim mode 5083 * 5084 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5085 * @memcg's configured limit, %false if it doesn't. 5086 */ 5087 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, 5088 gfp_t gfp_mask) 5089 { 5090 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5091 5092 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5093 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); 5094 5095 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) { 5096 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 5097 return true; 5098 } 5099 5100 return false; 5101 } 5102 5103 /** 5104 * mem_cgroup_sk_uncharge - uncharge socket memory 5105 * @sk: socket in memcg to uncharge 5106 * @nr_pages: number of pages to uncharge 5107 */ 5108 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages) 5109 { 5110 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5111 5112 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5113 memcg1_uncharge_skmem(memcg, nr_pages); 5114 return; 5115 } 5116 5117 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 5118 5119 refill_stock(memcg, nr_pages); 5120 } 5121 5122 static int __init cgroup_memory(char *s) 5123 { 5124 char *token; 5125 5126 while ((token = strsep(&s, ",")) != NULL) { 5127 if (!*token) 5128 continue; 5129 if (!strcmp(token, "nosocket")) 5130 cgroup_memory_nosocket = true; 5131 if (!strcmp(token, "nokmem")) 5132 cgroup_memory_nokmem = true; 5133 if (!strcmp(token, "nobpf")) 5134 cgroup_memory_nobpf = true; 5135 } 5136 return 1; 5137 } 5138 __setup("cgroup.memory=", cgroup_memory); 5139 5140 /* 5141 * Memory controller init before cgroup_init() initialize root_mem_cgroup. 5142 * 5143 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 5144 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 5145 * basically everything that doesn't depend on a specific mem_cgroup structure 5146 * should be initialized from here. 5147 */ 5148 int __init mem_cgroup_init(void) 5149 { 5150 unsigned int memcg_size; 5151 int cpu; 5152 5153 /* 5154 * Currently s32 type (can refer to struct batched_lruvec_stat) is 5155 * used for per-memcg-per-cpu caching of per-node statistics. In order 5156 * to work fine, we should make sure that the overfill threshold can't 5157 * exceed S32_MAX / PAGE_SIZE. 5158 */ 5159 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 5160 5161 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 5162 memcg_hotplug_cpu_dead); 5163 5164 for_each_possible_cpu(cpu) { 5165 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5166 drain_local_memcg_stock); 5167 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work, 5168 drain_local_obj_stock); 5169 } 5170 5171 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids); 5172 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0, 5173 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); 5174 5175 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node, 5176 SLAB_PANIC | SLAB_HWCACHE_ALIGN); 5177 5178 return 0; 5179 } 5180 5181 #ifdef CONFIG_SWAP 5182 /** 5183 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 5184 * @folio: folio being added to swap 5185 * @entry: swap entry to charge 5186 * 5187 * Try to charge @folio's memcg for the swap space at @entry. 5188 * 5189 * Returns 0 on success, -ENOMEM on failure. 5190 */ 5191 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 5192 { 5193 unsigned int nr_pages = folio_nr_pages(folio); 5194 struct page_counter *counter; 5195 struct mem_cgroup *memcg; 5196 5197 if (do_memsw_account()) 5198 return 0; 5199 5200 memcg = folio_memcg(folio); 5201 5202 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 5203 if (!memcg) 5204 return 0; 5205 5206 if (!entry.val) { 5207 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5208 return 0; 5209 } 5210 5211 memcg = mem_cgroup_id_get_online(memcg); 5212 5213 if (!mem_cgroup_is_root(memcg) && 5214 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 5215 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 5216 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5217 mem_cgroup_id_put(memcg); 5218 return -ENOMEM; 5219 } 5220 5221 /* Get references for the tail pages, too */ 5222 if (nr_pages > 1) 5223 mem_cgroup_id_get_many(memcg, nr_pages - 1); 5224 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 5225 5226 swap_cgroup_record(folio, mem_cgroup_id(memcg), entry); 5227 5228 return 0; 5229 } 5230 5231 /** 5232 * __mem_cgroup_uncharge_swap - uncharge swap space 5233 * @entry: swap entry to uncharge 5234 * @nr_pages: the amount of swap space to uncharge 5235 */ 5236 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 5237 { 5238 struct mem_cgroup *memcg; 5239 unsigned short id; 5240 5241 id = swap_cgroup_clear(entry, nr_pages); 5242 rcu_read_lock(); 5243 memcg = mem_cgroup_from_id(id); 5244 if (memcg) { 5245 if (!mem_cgroup_is_root(memcg)) { 5246 if (do_memsw_account()) 5247 page_counter_uncharge(&memcg->memsw, nr_pages); 5248 else 5249 page_counter_uncharge(&memcg->swap, nr_pages); 5250 } 5251 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 5252 mem_cgroup_id_put_many(memcg, nr_pages); 5253 } 5254 rcu_read_unlock(); 5255 } 5256 5257 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5258 { 5259 long nr_swap_pages = get_nr_swap_pages(); 5260 5261 if (mem_cgroup_disabled() || do_memsw_account()) 5262 return nr_swap_pages; 5263 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 5264 nr_swap_pages = min_t(long, nr_swap_pages, 5265 READ_ONCE(memcg->swap.max) - 5266 page_counter_read(&memcg->swap)); 5267 return nr_swap_pages; 5268 } 5269 5270 bool mem_cgroup_swap_full(struct folio *folio) 5271 { 5272 struct mem_cgroup *memcg; 5273 5274 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5275 5276 if (vm_swap_full()) 5277 return true; 5278 if (do_memsw_account()) 5279 return false; 5280 5281 memcg = folio_memcg(folio); 5282 if (!memcg) 5283 return false; 5284 5285 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 5286 unsigned long usage = page_counter_read(&memcg->swap); 5287 5288 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 5289 usage * 2 >= READ_ONCE(memcg->swap.max)) 5290 return true; 5291 } 5292 5293 return false; 5294 } 5295 5296 static int __init setup_swap_account(char *s) 5297 { 5298 bool res; 5299 5300 if (!kstrtobool(s, &res) && !res) 5301 pr_warn_once("The swapaccount=0 commandline option is deprecated " 5302 "in favor of configuring swap control via cgroupfs. " 5303 "Please report your usecase to linux-mm@kvack.org if you " 5304 "depend on this functionality.\n"); 5305 return 1; 5306 } 5307 __setup("swapaccount=", setup_swap_account); 5308 5309 static u64 swap_current_read(struct cgroup_subsys_state *css, 5310 struct cftype *cft) 5311 { 5312 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5313 5314 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 5315 } 5316 5317 static int swap_peak_show(struct seq_file *sf, void *v) 5318 { 5319 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 5320 5321 return peak_show(sf, v, &memcg->swap); 5322 } 5323 5324 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf, 5325 size_t nbytes, loff_t off) 5326 { 5327 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5328 5329 return peak_write(of, buf, nbytes, off, &memcg->swap, 5330 &memcg->swap_peaks); 5331 } 5332 5333 static int swap_high_show(struct seq_file *m, void *v) 5334 { 5335 return seq_puts_memcg_tunable(m, 5336 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 5337 } 5338 5339 static ssize_t swap_high_write(struct kernfs_open_file *of, 5340 char *buf, size_t nbytes, loff_t off) 5341 { 5342 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5343 unsigned long high; 5344 int err; 5345 5346 buf = strstrip(buf); 5347 err = page_counter_memparse(buf, "max", &high); 5348 if (err) 5349 return err; 5350 5351 page_counter_set_high(&memcg->swap, high); 5352 5353 return nbytes; 5354 } 5355 5356 static int swap_max_show(struct seq_file *m, void *v) 5357 { 5358 return seq_puts_memcg_tunable(m, 5359 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 5360 } 5361 5362 static ssize_t swap_max_write(struct kernfs_open_file *of, 5363 char *buf, size_t nbytes, loff_t off) 5364 { 5365 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5366 unsigned long max; 5367 int err; 5368 5369 buf = strstrip(buf); 5370 err = page_counter_memparse(buf, "max", &max); 5371 if (err) 5372 return err; 5373 5374 xchg(&memcg->swap.max, max); 5375 5376 return nbytes; 5377 } 5378 5379 static int swap_events_show(struct seq_file *m, void *v) 5380 { 5381 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5382 5383 seq_printf(m, "high %lu\n", 5384 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 5385 seq_printf(m, "max %lu\n", 5386 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 5387 seq_printf(m, "fail %lu\n", 5388 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 5389 5390 return 0; 5391 } 5392 5393 static struct cftype swap_files[] = { 5394 { 5395 .name = "swap.current", 5396 .flags = CFTYPE_NOT_ON_ROOT, 5397 .read_u64 = swap_current_read, 5398 }, 5399 { 5400 .name = "swap.high", 5401 .flags = CFTYPE_NOT_ON_ROOT, 5402 .seq_show = swap_high_show, 5403 .write = swap_high_write, 5404 }, 5405 { 5406 .name = "swap.max", 5407 .flags = CFTYPE_NOT_ON_ROOT, 5408 .seq_show = swap_max_show, 5409 .write = swap_max_write, 5410 }, 5411 { 5412 .name = "swap.peak", 5413 .flags = CFTYPE_NOT_ON_ROOT, 5414 .open = peak_open, 5415 .release = peak_release, 5416 .seq_show = swap_peak_show, 5417 .write = swap_peak_write, 5418 }, 5419 { 5420 .name = "swap.events", 5421 .flags = CFTYPE_NOT_ON_ROOT, 5422 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 5423 .seq_show = swap_events_show, 5424 }, 5425 { } /* terminate */ 5426 }; 5427 5428 #ifdef CONFIG_ZSWAP 5429 /** 5430 * obj_cgroup_may_zswap - check if this cgroup can zswap 5431 * @objcg: the object cgroup 5432 * 5433 * Check if the hierarchical zswap limit has been reached. 5434 * 5435 * This doesn't check for specific headroom, and it is not atomic 5436 * either. But with zswap, the size of the allocation is only known 5437 * once compression has occurred, and this optimistic pre-check avoids 5438 * spending cycles on compression when there is already no room left 5439 * or zswap is disabled altogether somewhere in the hierarchy. 5440 */ 5441 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 5442 { 5443 struct mem_cgroup *memcg, *original_memcg; 5444 bool ret = true; 5445 5446 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5447 return true; 5448 5449 original_memcg = get_mem_cgroup_from_objcg(objcg); 5450 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 5451 memcg = parent_mem_cgroup(memcg)) { 5452 unsigned long max = READ_ONCE(memcg->zswap_max); 5453 unsigned long pages; 5454 5455 if (max == PAGE_COUNTER_MAX) 5456 continue; 5457 if (max == 0) { 5458 ret = false; 5459 break; 5460 } 5461 5462 /* Force flush to get accurate stats for charging */ 5463 __mem_cgroup_flush_stats(memcg, true); 5464 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 5465 if (pages < max) 5466 continue; 5467 ret = false; 5468 break; 5469 } 5470 mem_cgroup_put(original_memcg); 5471 return ret; 5472 } 5473 5474 /** 5475 * obj_cgroup_charge_zswap - charge compression backend memory 5476 * @objcg: the object cgroup 5477 * @size: size of compressed object 5478 * 5479 * This forces the charge after obj_cgroup_may_zswap() allowed 5480 * compression and storage in zswap for this cgroup to go ahead. 5481 */ 5482 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 5483 { 5484 struct mem_cgroup *memcg; 5485 5486 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5487 return; 5488 5489 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 5490 5491 /* PF_MEMALLOC context, charging must succeed */ 5492 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 5493 VM_WARN_ON_ONCE(1); 5494 5495 rcu_read_lock(); 5496 memcg = obj_cgroup_memcg(objcg); 5497 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 5498 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 5499 rcu_read_unlock(); 5500 } 5501 5502 /** 5503 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 5504 * @objcg: the object cgroup 5505 * @size: size of compressed object 5506 * 5507 * Uncharges zswap memory on page in. 5508 */ 5509 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 5510 { 5511 struct mem_cgroup *memcg; 5512 5513 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5514 return; 5515 5516 obj_cgroup_uncharge(objcg, size); 5517 5518 rcu_read_lock(); 5519 memcg = obj_cgroup_memcg(objcg); 5520 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 5521 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 5522 rcu_read_unlock(); 5523 } 5524 5525 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 5526 { 5527 /* if zswap is disabled, do not block pages going to the swapping device */ 5528 if (!zswap_is_enabled()) 5529 return true; 5530 5531 for (; memcg; memcg = parent_mem_cgroup(memcg)) 5532 if (!READ_ONCE(memcg->zswap_writeback)) 5533 return false; 5534 5535 return true; 5536 } 5537 5538 static u64 zswap_current_read(struct cgroup_subsys_state *css, 5539 struct cftype *cft) 5540 { 5541 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5542 5543 mem_cgroup_flush_stats(memcg); 5544 return memcg_page_state(memcg, MEMCG_ZSWAP_B); 5545 } 5546 5547 static int zswap_max_show(struct seq_file *m, void *v) 5548 { 5549 return seq_puts_memcg_tunable(m, 5550 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 5551 } 5552 5553 static ssize_t zswap_max_write(struct kernfs_open_file *of, 5554 char *buf, size_t nbytes, loff_t off) 5555 { 5556 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5557 unsigned long max; 5558 int err; 5559 5560 buf = strstrip(buf); 5561 err = page_counter_memparse(buf, "max", &max); 5562 if (err) 5563 return err; 5564 5565 xchg(&memcg->zswap_max, max); 5566 5567 return nbytes; 5568 } 5569 5570 static int zswap_writeback_show(struct seq_file *m, void *v) 5571 { 5572 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5573 5574 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); 5575 return 0; 5576 } 5577 5578 static ssize_t zswap_writeback_write(struct kernfs_open_file *of, 5579 char *buf, size_t nbytes, loff_t off) 5580 { 5581 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5582 int zswap_writeback; 5583 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback); 5584 5585 if (parse_ret) 5586 return parse_ret; 5587 5588 if (zswap_writeback != 0 && zswap_writeback != 1) 5589 return -EINVAL; 5590 5591 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); 5592 return nbytes; 5593 } 5594 5595 static struct cftype zswap_files[] = { 5596 { 5597 .name = "zswap.current", 5598 .flags = CFTYPE_NOT_ON_ROOT, 5599 .read_u64 = zswap_current_read, 5600 }, 5601 { 5602 .name = "zswap.max", 5603 .flags = CFTYPE_NOT_ON_ROOT, 5604 .seq_show = zswap_max_show, 5605 .write = zswap_max_write, 5606 }, 5607 { 5608 .name = "zswap.writeback", 5609 .seq_show = zswap_writeback_show, 5610 .write = zswap_writeback_write, 5611 }, 5612 { } /* terminate */ 5613 }; 5614 #endif /* CONFIG_ZSWAP */ 5615 5616 static int __init mem_cgroup_swap_init(void) 5617 { 5618 if (mem_cgroup_disabled()) 5619 return 0; 5620 5621 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 5622 #ifdef CONFIG_MEMCG_V1 5623 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 5624 #endif 5625 #ifdef CONFIG_ZSWAP 5626 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 5627 #endif 5628 return 0; 5629 } 5630 subsys_initcall(mem_cgroup_swap_init); 5631 5632 #endif /* CONFIG_SWAP */ 5633 5634 bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid) 5635 { 5636 return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true; 5637 } 5638 5639 void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) 5640 { 5641 if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5642 return; 5643 5644 if (!memcg) 5645 memcg = root_mem_cgroup; 5646 5647 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB", 5648 K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE), 5649 K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE)); 5650 } 5651