1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/cgroup-defs.h> 29 #include <linux/page_counter.h> 30 #include <linux/memcontrol.h> 31 #include <linux/cgroup.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/pagevec.h> 37 #include <linux/vm_event_item.h> 38 #include <linux/smp.h> 39 #include <linux/page-flags.h> 40 #include <linux/backing-dev.h> 41 #include <linux/bit_spinlock.h> 42 #include <linux/rcupdate.h> 43 #include <linux/limits.h> 44 #include <linux/export.h> 45 #include <linux/list.h> 46 #include <linux/mutex.h> 47 #include <linux/rbtree.h> 48 #include <linux/slab.h> 49 #include <linux/swapops.h> 50 #include <linux/spinlock.h> 51 #include <linux/fs.h> 52 #include <linux/seq_file.h> 53 #include <linux/parser.h> 54 #include <linux/vmpressure.h> 55 #include <linux/memremap.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/resume_user_mode.h> 62 #include <linux/psi.h> 63 #include <linux/seq_buf.h> 64 #include <linux/sched/isolation.h> 65 #include <linux/kmemleak.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 #include "memcontrol-v1.h" 71 72 #include <linux/uaccess.h> 73 74 #define CREATE_TRACE_POINTS 75 #include <trace/events/memcg.h> 76 #undef CREATE_TRACE_POINTS 77 78 #include <trace/events/vmscan.h> 79 80 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 81 EXPORT_SYMBOL(memory_cgrp_subsys); 82 83 struct mem_cgroup *root_mem_cgroup __read_mostly; 84 85 /* Active memory cgroup to use from an interrupt context */ 86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 88 89 /* Socket memory accounting disabled? */ 90 static bool cgroup_memory_nosocket __ro_after_init; 91 92 /* Kernel memory accounting disabled? */ 93 static bool cgroup_memory_nokmem __ro_after_init; 94 95 /* BPF memory accounting disabled? */ 96 static bool cgroup_memory_nobpf __ro_after_init; 97 98 #ifdef CONFIG_CGROUP_WRITEBACK 99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 100 #endif 101 102 static inline bool task_is_dying(void) 103 { 104 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 105 (current->flags & PF_EXITING); 106 } 107 108 /* Some nice accessors for the vmpressure. */ 109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 110 { 111 if (!memcg) 112 memcg = root_mem_cgroup; 113 return &memcg->vmpressure; 114 } 115 116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 117 { 118 return container_of(vmpr, struct mem_cgroup, vmpressure); 119 } 120 121 #define SEQ_BUF_SIZE SZ_4K 122 #define CURRENT_OBJCG_UPDATE_BIT 0 123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT) 124 125 static DEFINE_SPINLOCK(objcg_lock); 126 127 bool mem_cgroup_kmem_disabled(void) 128 { 129 return cgroup_memory_nokmem; 130 } 131 132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 133 unsigned int nr_pages); 134 135 static void obj_cgroup_release(struct percpu_ref *ref) 136 { 137 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 138 unsigned int nr_bytes; 139 unsigned int nr_pages; 140 unsigned long flags; 141 142 /* 143 * At this point all allocated objects are freed, and 144 * objcg->nr_charged_bytes can't have an arbitrary byte value. 145 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 146 * 147 * The following sequence can lead to it: 148 * 1) CPU0: objcg == stock->cached_objcg 149 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 150 * PAGE_SIZE bytes are charged 151 * 3) CPU1: a process from another memcg is allocating something, 152 * the stock if flushed, 153 * objcg->nr_charged_bytes = PAGE_SIZE - 92 154 * 5) CPU0: we do release this object, 155 * 92 bytes are added to stock->nr_bytes 156 * 6) CPU0: stock is flushed, 157 * 92 bytes are added to objcg->nr_charged_bytes 158 * 159 * In the result, nr_charged_bytes == PAGE_SIZE. 160 * This page will be uncharged in obj_cgroup_release(). 161 */ 162 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 163 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 164 nr_pages = nr_bytes >> PAGE_SHIFT; 165 166 if (nr_pages) 167 obj_cgroup_uncharge_pages(objcg, nr_pages); 168 169 spin_lock_irqsave(&objcg_lock, flags); 170 list_del(&objcg->list); 171 spin_unlock_irqrestore(&objcg_lock, flags); 172 173 percpu_ref_exit(ref); 174 kfree_rcu(objcg, rcu); 175 } 176 177 static struct obj_cgroup *obj_cgroup_alloc(void) 178 { 179 struct obj_cgroup *objcg; 180 int ret; 181 182 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 183 if (!objcg) 184 return NULL; 185 186 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 187 GFP_KERNEL); 188 if (ret) { 189 kfree(objcg); 190 return NULL; 191 } 192 INIT_LIST_HEAD(&objcg->list); 193 return objcg; 194 } 195 196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 197 struct mem_cgroup *parent) 198 { 199 struct obj_cgroup *objcg, *iter; 200 201 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 202 203 spin_lock_irq(&objcg_lock); 204 205 /* 1) Ready to reparent active objcg. */ 206 list_add(&objcg->list, &memcg->objcg_list); 207 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 208 list_for_each_entry(iter, &memcg->objcg_list, list) 209 WRITE_ONCE(iter->memcg, parent); 210 /* 3) Move already reparented objcgs to the parent's list */ 211 list_splice(&memcg->objcg_list, &parent->objcg_list); 212 213 spin_unlock_irq(&objcg_lock); 214 215 percpu_ref_kill(&objcg->refcnt); 216 } 217 218 /* 219 * A lot of the calls to the cache allocation functions are expected to be 220 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are 221 * conditional to this static branch, we'll have to allow modules that does 222 * kmem_cache_alloc and the such to see this symbol as well 223 */ 224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 225 EXPORT_SYMBOL(memcg_kmem_online_key); 226 227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 228 EXPORT_SYMBOL(memcg_bpf_enabled_key); 229 230 /** 231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 232 * @folio: folio of interest 233 * 234 * If memcg is bound to the default hierarchy, css of the memcg associated 235 * with @folio is returned. The returned css remains associated with @folio 236 * until it is released. 237 * 238 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 239 * is returned. 240 */ 241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 242 { 243 struct mem_cgroup *memcg = folio_memcg(folio); 244 245 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 246 memcg = root_mem_cgroup; 247 248 return &memcg->css; 249 } 250 251 /** 252 * page_cgroup_ino - return inode number of the memcg a page is charged to 253 * @page: the page 254 * 255 * Look up the closest online ancestor of the memory cgroup @page is charged to 256 * and return its inode number or 0 if @page is not charged to any cgroup. It 257 * is safe to call this function without holding a reference to @page. 258 * 259 * Note, this function is inherently racy, because there is nothing to prevent 260 * the cgroup inode from getting torn down and potentially reallocated a moment 261 * after page_cgroup_ino() returns, so it only should be used by callers that 262 * do not care (such as procfs interfaces). 263 */ 264 ino_t page_cgroup_ino(struct page *page) 265 { 266 struct mem_cgroup *memcg; 267 unsigned long ino = 0; 268 269 rcu_read_lock(); 270 /* page_folio() is racy here, but the entire function is racy anyway */ 271 memcg = folio_memcg_check(page_folio(page)); 272 273 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 274 memcg = parent_mem_cgroup(memcg); 275 if (memcg) 276 ino = cgroup_ino(memcg->css.cgroup); 277 rcu_read_unlock(); 278 return ino; 279 } 280 281 /* Subset of node_stat_item for memcg stats */ 282 static const unsigned int memcg_node_stat_items[] = { 283 NR_INACTIVE_ANON, 284 NR_ACTIVE_ANON, 285 NR_INACTIVE_FILE, 286 NR_ACTIVE_FILE, 287 NR_UNEVICTABLE, 288 NR_SLAB_RECLAIMABLE_B, 289 NR_SLAB_UNRECLAIMABLE_B, 290 WORKINGSET_REFAULT_ANON, 291 WORKINGSET_REFAULT_FILE, 292 WORKINGSET_ACTIVATE_ANON, 293 WORKINGSET_ACTIVATE_FILE, 294 WORKINGSET_RESTORE_ANON, 295 WORKINGSET_RESTORE_FILE, 296 WORKINGSET_NODERECLAIM, 297 NR_ANON_MAPPED, 298 NR_FILE_MAPPED, 299 NR_FILE_PAGES, 300 NR_FILE_DIRTY, 301 NR_WRITEBACK, 302 NR_SHMEM, 303 NR_SHMEM_THPS, 304 NR_FILE_THPS, 305 NR_ANON_THPS, 306 NR_KERNEL_STACK_KB, 307 NR_PAGETABLE, 308 NR_SECONDARY_PAGETABLE, 309 #ifdef CONFIG_SWAP 310 NR_SWAPCACHE, 311 #endif 312 #ifdef CONFIG_NUMA_BALANCING 313 PGPROMOTE_SUCCESS, 314 #endif 315 PGDEMOTE_KSWAPD, 316 PGDEMOTE_DIRECT, 317 PGDEMOTE_KHUGEPAGED, 318 #ifdef CONFIG_HUGETLB_PAGE 319 NR_HUGETLB, 320 #endif 321 }; 322 323 static const unsigned int memcg_stat_items[] = { 324 MEMCG_SWAP, 325 MEMCG_SOCK, 326 MEMCG_PERCPU_B, 327 MEMCG_VMALLOC, 328 MEMCG_KMEM, 329 MEMCG_ZSWAP_B, 330 MEMCG_ZSWAPPED, 331 }; 332 333 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items) 334 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \ 335 ARRAY_SIZE(memcg_stat_items)) 336 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX) 337 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly; 338 339 static void init_memcg_stats(void) 340 { 341 u8 i, j = 0; 342 343 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX); 344 345 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index)); 346 347 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j) 348 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j; 349 350 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j) 351 mem_cgroup_stats_index[memcg_stat_items[i]] = j; 352 } 353 354 static inline int memcg_stats_index(int idx) 355 { 356 return mem_cgroup_stats_index[idx]; 357 } 358 359 struct lruvec_stats_percpu { 360 /* Local (CPU and cgroup) state */ 361 long state[NR_MEMCG_NODE_STAT_ITEMS]; 362 363 /* Delta calculation for lockless upward propagation */ 364 long state_prev[NR_MEMCG_NODE_STAT_ITEMS]; 365 }; 366 367 struct lruvec_stats { 368 /* Aggregated (CPU and subtree) state */ 369 long state[NR_MEMCG_NODE_STAT_ITEMS]; 370 371 /* Non-hierarchical (CPU aggregated) state */ 372 long state_local[NR_MEMCG_NODE_STAT_ITEMS]; 373 374 /* Pending child counts during tree propagation */ 375 long state_pending[NR_MEMCG_NODE_STAT_ITEMS]; 376 }; 377 378 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) 379 { 380 struct mem_cgroup_per_node *pn; 381 long x; 382 int i; 383 384 if (mem_cgroup_disabled()) 385 return node_page_state(lruvec_pgdat(lruvec), idx); 386 387 i = memcg_stats_index(idx); 388 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 389 return 0; 390 391 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 392 x = READ_ONCE(pn->lruvec_stats->state[i]); 393 #ifdef CONFIG_SMP 394 if (x < 0) 395 x = 0; 396 #endif 397 return x; 398 } 399 400 unsigned long lruvec_page_state_local(struct lruvec *lruvec, 401 enum node_stat_item idx) 402 { 403 struct mem_cgroup_per_node *pn; 404 long x; 405 int i; 406 407 if (mem_cgroup_disabled()) 408 return node_page_state(lruvec_pgdat(lruvec), idx); 409 410 i = memcg_stats_index(idx); 411 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 412 return 0; 413 414 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 415 x = READ_ONCE(pn->lruvec_stats->state_local[i]); 416 #ifdef CONFIG_SMP 417 if (x < 0) 418 x = 0; 419 #endif 420 return x; 421 } 422 423 /* Subset of vm_event_item to report for memcg event stats */ 424 static const unsigned int memcg_vm_event_stat[] = { 425 #ifdef CONFIG_MEMCG_V1 426 PGPGIN, 427 PGPGOUT, 428 #endif 429 PSWPIN, 430 PSWPOUT, 431 PGSCAN_KSWAPD, 432 PGSCAN_DIRECT, 433 PGSCAN_KHUGEPAGED, 434 PGSTEAL_KSWAPD, 435 PGSTEAL_DIRECT, 436 PGSTEAL_KHUGEPAGED, 437 PGFAULT, 438 PGMAJFAULT, 439 PGREFILL, 440 PGACTIVATE, 441 PGDEACTIVATE, 442 PGLAZYFREE, 443 PGLAZYFREED, 444 #ifdef CONFIG_SWAP 445 SWPIN_ZERO, 446 SWPOUT_ZERO, 447 #endif 448 #ifdef CONFIG_ZSWAP 449 ZSWPIN, 450 ZSWPOUT, 451 ZSWPWB, 452 #endif 453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 454 THP_FAULT_ALLOC, 455 THP_COLLAPSE_ALLOC, 456 THP_SWPOUT, 457 THP_SWPOUT_FALLBACK, 458 #endif 459 #ifdef CONFIG_NUMA_BALANCING 460 NUMA_PAGE_MIGRATE, 461 NUMA_PTE_UPDATES, 462 NUMA_HINT_FAULTS, 463 #endif 464 }; 465 466 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 467 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 468 469 static void init_memcg_events(void) 470 { 471 u8 i; 472 473 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX); 474 475 memset(mem_cgroup_events_index, U8_MAX, 476 sizeof(mem_cgroup_events_index)); 477 478 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 479 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i; 480 } 481 482 static inline int memcg_events_index(enum vm_event_item idx) 483 { 484 return mem_cgroup_events_index[idx]; 485 } 486 487 struct memcg_vmstats_percpu { 488 /* Stats updates since the last flush */ 489 unsigned int stats_updates; 490 491 /* Cached pointers for fast iteration in memcg_rstat_updated() */ 492 struct memcg_vmstats_percpu *parent; 493 struct memcg_vmstats *vmstats; 494 495 /* The above should fit a single cacheline for memcg_rstat_updated() */ 496 497 /* Local (CPU and cgroup) page state & events */ 498 long state[MEMCG_VMSTAT_SIZE]; 499 unsigned long events[NR_MEMCG_EVENTS]; 500 501 /* Delta calculation for lockless upward propagation */ 502 long state_prev[MEMCG_VMSTAT_SIZE]; 503 unsigned long events_prev[NR_MEMCG_EVENTS]; 504 } ____cacheline_aligned; 505 506 struct memcg_vmstats { 507 /* Aggregated (CPU and subtree) page state & events */ 508 long state[MEMCG_VMSTAT_SIZE]; 509 unsigned long events[NR_MEMCG_EVENTS]; 510 511 /* Non-hierarchical (CPU aggregated) page state & events */ 512 long state_local[MEMCG_VMSTAT_SIZE]; 513 unsigned long events_local[NR_MEMCG_EVENTS]; 514 515 /* Pending child counts during tree propagation */ 516 long state_pending[MEMCG_VMSTAT_SIZE]; 517 unsigned long events_pending[NR_MEMCG_EVENTS]; 518 519 /* Stats updates since the last flush */ 520 atomic64_t stats_updates; 521 }; 522 523 /* 524 * memcg and lruvec stats flushing 525 * 526 * Many codepaths leading to stats update or read are performance sensitive and 527 * adding stats flushing in such codepaths is not desirable. So, to optimize the 528 * flushing the kernel does: 529 * 530 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 531 * rstat update tree grow unbounded. 532 * 533 * 2) Flush the stats synchronously on reader side only when there are more than 534 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 535 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 536 * only for 2 seconds due to (1). 537 */ 538 static void flush_memcg_stats_dwork(struct work_struct *w); 539 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 540 static u64 flush_last_time; 541 542 #define FLUSH_TIME (2UL*HZ) 543 544 /* 545 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can 546 * not rely on this as part of an acquired spinlock_t lock. These functions are 547 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion 548 * is sufficient. 549 */ 550 static void memcg_stats_lock(void) 551 { 552 preempt_disable_nested(); 553 VM_WARN_ON_IRQS_ENABLED(); 554 } 555 556 static void __memcg_stats_lock(void) 557 { 558 preempt_disable_nested(); 559 } 560 561 static void memcg_stats_unlock(void) 562 { 563 preempt_enable_nested(); 564 } 565 566 567 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) 568 { 569 return atomic64_read(&vmstats->stats_updates) > 570 MEMCG_CHARGE_BATCH * num_online_cpus(); 571 } 572 573 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) 574 { 575 struct memcg_vmstats_percpu *statc; 576 int cpu = smp_processor_id(); 577 unsigned int stats_updates; 578 579 if (!val) 580 return; 581 582 cgroup_rstat_updated(memcg->css.cgroup, cpu); 583 statc = this_cpu_ptr(memcg->vmstats_percpu); 584 for (; statc; statc = statc->parent) { 585 stats_updates = READ_ONCE(statc->stats_updates) + abs(val); 586 WRITE_ONCE(statc->stats_updates, stats_updates); 587 if (stats_updates < MEMCG_CHARGE_BATCH) 588 continue; 589 590 /* 591 * If @memcg is already flush-able, increasing stats_updates is 592 * redundant. Avoid the overhead of the atomic update. 593 */ 594 if (!memcg_vmstats_needs_flush(statc->vmstats)) 595 atomic64_add(stats_updates, 596 &statc->vmstats->stats_updates); 597 WRITE_ONCE(statc->stats_updates, 0); 598 } 599 } 600 601 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) 602 { 603 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); 604 605 trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates), 606 force, needs_flush); 607 608 if (!force && !needs_flush) 609 return; 610 611 if (mem_cgroup_is_root(memcg)) 612 WRITE_ONCE(flush_last_time, jiffies_64); 613 614 cgroup_rstat_flush(memcg->css.cgroup); 615 } 616 617 /* 618 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree 619 * @memcg: root of the subtree to flush 620 * 621 * Flushing is serialized by the underlying global rstat lock. There is also a 622 * minimum amount of work to be done even if there are no stat updates to flush. 623 * Hence, we only flush the stats if the updates delta exceeds a threshold. This 624 * avoids unnecessary work and contention on the underlying lock. 625 */ 626 void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 627 { 628 if (mem_cgroup_disabled()) 629 return; 630 631 if (!memcg) 632 memcg = root_mem_cgroup; 633 634 __mem_cgroup_flush_stats(memcg, false); 635 } 636 637 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 638 { 639 /* Only flush if the periodic flusher is one full cycle late */ 640 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) 641 mem_cgroup_flush_stats(memcg); 642 } 643 644 static void flush_memcg_stats_dwork(struct work_struct *w) 645 { 646 /* 647 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing 648 * in latency-sensitive paths is as cheap as possible. 649 */ 650 __mem_cgroup_flush_stats(root_mem_cgroup, true); 651 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); 652 } 653 654 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 655 { 656 long x; 657 int i = memcg_stats_index(idx); 658 659 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 660 return 0; 661 662 x = READ_ONCE(memcg->vmstats->state[i]); 663 #ifdef CONFIG_SMP 664 if (x < 0) 665 x = 0; 666 #endif 667 return x; 668 } 669 670 static int memcg_page_state_unit(int item); 671 672 /* 673 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round 674 * up non-zero sub-page updates to 1 page as zero page updates are ignored. 675 */ 676 static int memcg_state_val_in_pages(int idx, int val) 677 { 678 int unit = memcg_page_state_unit(idx); 679 680 if (!val || unit == PAGE_SIZE) 681 return val; 682 else 683 return max(val * unit / PAGE_SIZE, 1UL); 684 } 685 686 /** 687 * __mod_memcg_state - update cgroup memory statistics 688 * @memcg: the memory cgroup 689 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 690 * @val: delta to add to the counter, can be negative 691 */ 692 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 693 int val) 694 { 695 int i = memcg_stats_index(idx); 696 697 if (mem_cgroup_disabled()) 698 return; 699 700 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 701 return; 702 703 __this_cpu_add(memcg->vmstats_percpu->state[i], val); 704 val = memcg_state_val_in_pages(idx, val); 705 memcg_rstat_updated(memcg, val); 706 trace_mod_memcg_state(memcg, idx, val); 707 } 708 709 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 710 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 711 { 712 long x; 713 int i = memcg_stats_index(idx); 714 715 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 716 return 0; 717 718 x = READ_ONCE(memcg->vmstats->state_local[i]); 719 #ifdef CONFIG_SMP 720 if (x < 0) 721 x = 0; 722 #endif 723 return x; 724 } 725 726 static void __mod_memcg_lruvec_state(struct lruvec *lruvec, 727 enum node_stat_item idx, 728 int val) 729 { 730 struct mem_cgroup_per_node *pn; 731 struct mem_cgroup *memcg; 732 int i = memcg_stats_index(idx); 733 734 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 735 return; 736 737 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 738 memcg = pn->memcg; 739 740 /* 741 * The caller from rmap relies on disabled preemption because they never 742 * update their counter from in-interrupt context. For these two 743 * counters we check that the update is never performed from an 744 * interrupt context while other caller need to have disabled interrupt. 745 */ 746 __memcg_stats_lock(); 747 if (IS_ENABLED(CONFIG_DEBUG_VM)) { 748 switch (idx) { 749 case NR_ANON_MAPPED: 750 case NR_FILE_MAPPED: 751 case NR_ANON_THPS: 752 WARN_ON_ONCE(!in_task()); 753 break; 754 default: 755 VM_WARN_ON_IRQS_ENABLED(); 756 } 757 } 758 759 /* Update memcg */ 760 __this_cpu_add(memcg->vmstats_percpu->state[i], val); 761 762 /* Update lruvec */ 763 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val); 764 765 val = memcg_state_val_in_pages(idx, val); 766 memcg_rstat_updated(memcg, val); 767 trace_mod_memcg_lruvec_state(memcg, idx, val); 768 memcg_stats_unlock(); 769 } 770 771 /** 772 * __mod_lruvec_state - update lruvec memory statistics 773 * @lruvec: the lruvec 774 * @idx: the stat item 775 * @val: delta to add to the counter, can be negative 776 * 777 * The lruvec is the intersection of the NUMA node and a cgroup. This 778 * function updates the all three counters that are affected by a 779 * change of state at this level: per-node, per-cgroup, per-lruvec. 780 */ 781 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 782 int val) 783 { 784 /* Update node */ 785 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 786 787 /* Update memcg and lruvec */ 788 if (!mem_cgroup_disabled()) 789 __mod_memcg_lruvec_state(lruvec, idx, val); 790 } 791 792 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 793 int val) 794 { 795 struct mem_cgroup *memcg; 796 pg_data_t *pgdat = folio_pgdat(folio); 797 struct lruvec *lruvec; 798 799 rcu_read_lock(); 800 memcg = folio_memcg(folio); 801 /* Untracked pages have no memcg, no lruvec. Update only the node */ 802 if (!memcg) { 803 rcu_read_unlock(); 804 __mod_node_page_state(pgdat, idx, val); 805 return; 806 } 807 808 lruvec = mem_cgroup_lruvec(memcg, pgdat); 809 __mod_lruvec_state(lruvec, idx, val); 810 rcu_read_unlock(); 811 } 812 EXPORT_SYMBOL(__lruvec_stat_mod_folio); 813 814 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 815 { 816 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 817 struct mem_cgroup *memcg; 818 struct lruvec *lruvec; 819 820 rcu_read_lock(); 821 memcg = mem_cgroup_from_slab_obj(p); 822 823 /* 824 * Untracked pages have no memcg, no lruvec. Update only the 825 * node. If we reparent the slab objects to the root memcg, 826 * when we free the slab object, we need to update the per-memcg 827 * vmstats to keep it correct for the root memcg. 828 */ 829 if (!memcg) { 830 __mod_node_page_state(pgdat, idx, val); 831 } else { 832 lruvec = mem_cgroup_lruvec(memcg, pgdat); 833 __mod_lruvec_state(lruvec, idx, val); 834 } 835 rcu_read_unlock(); 836 } 837 838 /** 839 * __count_memcg_events - account VM events in a cgroup 840 * @memcg: the memory cgroup 841 * @idx: the event item 842 * @count: the number of events that occurred 843 */ 844 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 845 unsigned long count) 846 { 847 int i = memcg_events_index(idx); 848 849 if (mem_cgroup_disabled()) 850 return; 851 852 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 853 return; 854 855 memcg_stats_lock(); 856 __this_cpu_add(memcg->vmstats_percpu->events[i], count); 857 memcg_rstat_updated(memcg, count); 858 trace_count_memcg_events(memcg, idx, count); 859 memcg_stats_unlock(); 860 } 861 862 unsigned long memcg_events(struct mem_cgroup *memcg, int event) 863 { 864 int i = memcg_events_index(event); 865 866 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 867 return 0; 868 869 return READ_ONCE(memcg->vmstats->events[i]); 870 } 871 872 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 873 { 874 int i = memcg_events_index(event); 875 876 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 877 return 0; 878 879 return READ_ONCE(memcg->vmstats->events_local[i]); 880 } 881 882 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 883 { 884 /* 885 * mm_update_next_owner() may clear mm->owner to NULL 886 * if it races with swapoff, page migration, etc. 887 * So this can be called with p == NULL. 888 */ 889 if (unlikely(!p)) 890 return NULL; 891 892 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 893 } 894 EXPORT_SYMBOL(mem_cgroup_from_task); 895 896 static __always_inline struct mem_cgroup *active_memcg(void) 897 { 898 if (!in_task()) 899 return this_cpu_read(int_active_memcg); 900 else 901 return current->active_memcg; 902 } 903 904 /** 905 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 906 * @mm: mm from which memcg should be extracted. It can be NULL. 907 * 908 * Obtain a reference on mm->memcg and returns it if successful. If mm 909 * is NULL, then the memcg is chosen as follows: 910 * 1) The active memcg, if set. 911 * 2) current->mm->memcg, if available 912 * 3) root memcg 913 * If mem_cgroup is disabled, NULL is returned. 914 */ 915 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 916 { 917 struct mem_cgroup *memcg; 918 919 if (mem_cgroup_disabled()) 920 return NULL; 921 922 /* 923 * Page cache insertions can happen without an 924 * actual mm context, e.g. during disk probing 925 * on boot, loopback IO, acct() writes etc. 926 * 927 * No need to css_get on root memcg as the reference 928 * counting is disabled on the root level in the 929 * cgroup core. See CSS_NO_REF. 930 */ 931 if (unlikely(!mm)) { 932 memcg = active_memcg(); 933 if (unlikely(memcg)) { 934 /* remote memcg must hold a ref */ 935 css_get(&memcg->css); 936 return memcg; 937 } 938 mm = current->mm; 939 if (unlikely(!mm)) 940 return root_mem_cgroup; 941 } 942 943 rcu_read_lock(); 944 do { 945 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 946 if (unlikely(!memcg)) 947 memcg = root_mem_cgroup; 948 } while (!css_tryget(&memcg->css)); 949 rcu_read_unlock(); 950 return memcg; 951 } 952 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 953 954 /** 955 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg. 956 */ 957 struct mem_cgroup *get_mem_cgroup_from_current(void) 958 { 959 struct mem_cgroup *memcg; 960 961 if (mem_cgroup_disabled()) 962 return NULL; 963 964 again: 965 rcu_read_lock(); 966 memcg = mem_cgroup_from_task(current); 967 if (!css_tryget(&memcg->css)) { 968 rcu_read_unlock(); 969 goto again; 970 } 971 rcu_read_unlock(); 972 return memcg; 973 } 974 975 /** 976 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg. 977 * @folio: folio from which memcg should be extracted. 978 */ 979 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 980 { 981 struct mem_cgroup *memcg = folio_memcg(folio); 982 983 if (mem_cgroup_disabled()) 984 return NULL; 985 986 rcu_read_lock(); 987 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 988 memcg = root_mem_cgroup; 989 rcu_read_unlock(); 990 return memcg; 991 } 992 993 /** 994 * mem_cgroup_iter - iterate over memory cgroup hierarchy 995 * @root: hierarchy root 996 * @prev: previously returned memcg, NULL on first invocation 997 * @reclaim: cookie for shared reclaim walks, NULL for full walks 998 * 999 * Returns references to children of the hierarchy below @root, or 1000 * @root itself, or %NULL after a full round-trip. 1001 * 1002 * Caller must pass the return value in @prev on subsequent 1003 * invocations for reference counting, or use mem_cgroup_iter_break() 1004 * to cancel a hierarchy walk before the round-trip is complete. 1005 * 1006 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1007 * in the hierarchy among all concurrent reclaimers operating on the 1008 * same node. 1009 */ 1010 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1011 struct mem_cgroup *prev, 1012 struct mem_cgroup_reclaim_cookie *reclaim) 1013 { 1014 struct mem_cgroup_reclaim_iter *iter; 1015 struct cgroup_subsys_state *css; 1016 struct mem_cgroup *pos; 1017 struct mem_cgroup *next; 1018 1019 if (mem_cgroup_disabled()) 1020 return NULL; 1021 1022 if (!root) 1023 root = root_mem_cgroup; 1024 1025 rcu_read_lock(); 1026 restart: 1027 next = NULL; 1028 1029 if (reclaim) { 1030 int gen; 1031 int nid = reclaim->pgdat->node_id; 1032 1033 iter = &root->nodeinfo[nid]->iter; 1034 gen = atomic_read(&iter->generation); 1035 1036 /* 1037 * On start, join the current reclaim iteration cycle. 1038 * Exit when a concurrent walker completes it. 1039 */ 1040 if (!prev) 1041 reclaim->generation = gen; 1042 else if (reclaim->generation != gen) 1043 goto out_unlock; 1044 1045 pos = READ_ONCE(iter->position); 1046 } else 1047 pos = prev; 1048 1049 css = pos ? &pos->css : NULL; 1050 1051 while ((css = css_next_descendant_pre(css, &root->css))) { 1052 /* 1053 * Verify the css and acquire a reference. The root 1054 * is provided by the caller, so we know it's alive 1055 * and kicking, and don't take an extra reference. 1056 */ 1057 if (css == &root->css || css_tryget(css)) 1058 break; 1059 } 1060 1061 next = mem_cgroup_from_css(css); 1062 1063 if (reclaim) { 1064 /* 1065 * The position could have already been updated by a competing 1066 * thread, so check that the value hasn't changed since we read 1067 * it to avoid reclaiming from the same cgroup twice. 1068 */ 1069 if (cmpxchg(&iter->position, pos, next) != pos) { 1070 if (css && css != &root->css) 1071 css_put(css); 1072 goto restart; 1073 } 1074 1075 if (!next) { 1076 atomic_inc(&iter->generation); 1077 1078 /* 1079 * Reclaimers share the hierarchy walk, and a 1080 * new one might jump in right at the end of 1081 * the hierarchy - make sure they see at least 1082 * one group and restart from the beginning. 1083 */ 1084 if (!prev) 1085 goto restart; 1086 } 1087 } 1088 1089 out_unlock: 1090 rcu_read_unlock(); 1091 if (prev && prev != root) 1092 css_put(&prev->css); 1093 1094 return next; 1095 } 1096 1097 /** 1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1099 * @root: hierarchy root 1100 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1101 */ 1102 void mem_cgroup_iter_break(struct mem_cgroup *root, 1103 struct mem_cgroup *prev) 1104 { 1105 if (!root) 1106 root = root_mem_cgroup; 1107 if (prev && prev != root) 1108 css_put(&prev->css); 1109 } 1110 1111 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1112 struct mem_cgroup *dead_memcg) 1113 { 1114 struct mem_cgroup_reclaim_iter *iter; 1115 struct mem_cgroup_per_node *mz; 1116 int nid; 1117 1118 for_each_node(nid) { 1119 mz = from->nodeinfo[nid]; 1120 iter = &mz->iter; 1121 cmpxchg(&iter->position, dead_memcg, NULL); 1122 } 1123 } 1124 1125 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1126 { 1127 struct mem_cgroup *memcg = dead_memcg; 1128 struct mem_cgroup *last; 1129 1130 do { 1131 __invalidate_reclaim_iterators(memcg, dead_memcg); 1132 last = memcg; 1133 } while ((memcg = parent_mem_cgroup(memcg))); 1134 1135 /* 1136 * When cgroup1 non-hierarchy mode is used, 1137 * parent_mem_cgroup() does not walk all the way up to the 1138 * cgroup root (root_mem_cgroup). So we have to handle 1139 * dead_memcg from cgroup root separately. 1140 */ 1141 if (!mem_cgroup_is_root(last)) 1142 __invalidate_reclaim_iterators(root_mem_cgroup, 1143 dead_memcg); 1144 } 1145 1146 /** 1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1148 * @memcg: hierarchy root 1149 * @fn: function to call for each task 1150 * @arg: argument passed to @fn 1151 * 1152 * This function iterates over tasks attached to @memcg or to any of its 1153 * descendants and calls @fn for each task. If @fn returns a non-zero 1154 * value, the function breaks the iteration loop. Otherwise, it will iterate 1155 * over all tasks and return 0. 1156 * 1157 * This function must not be called for the root memory cgroup. 1158 */ 1159 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1160 int (*fn)(struct task_struct *, void *), void *arg) 1161 { 1162 struct mem_cgroup *iter; 1163 int ret = 0; 1164 int i = 0; 1165 1166 BUG_ON(mem_cgroup_is_root(memcg)); 1167 1168 for_each_mem_cgroup_tree(iter, memcg) { 1169 struct css_task_iter it; 1170 struct task_struct *task; 1171 1172 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1173 while (!ret && (task = css_task_iter_next(&it))) { 1174 /* Avoid potential softlockup warning */ 1175 if ((++i & 1023) == 0) 1176 cond_resched(); 1177 ret = fn(task, arg); 1178 } 1179 css_task_iter_end(&it); 1180 if (ret) { 1181 mem_cgroup_iter_break(memcg, iter); 1182 break; 1183 } 1184 } 1185 } 1186 1187 #ifdef CONFIG_DEBUG_VM 1188 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1189 { 1190 struct mem_cgroup *memcg; 1191 1192 if (mem_cgroup_disabled()) 1193 return; 1194 1195 memcg = folio_memcg(folio); 1196 1197 if (!memcg) 1198 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1199 else 1200 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1201 } 1202 #endif 1203 1204 /** 1205 * folio_lruvec_lock - Lock the lruvec for a folio. 1206 * @folio: Pointer to the folio. 1207 * 1208 * These functions are safe to use under any of the following conditions: 1209 * - folio locked 1210 * - folio_test_lru false 1211 * - folio frozen (refcount of 0) 1212 * 1213 * Return: The lruvec this folio is on with its lock held. 1214 */ 1215 struct lruvec *folio_lruvec_lock(struct folio *folio) 1216 { 1217 struct lruvec *lruvec = folio_lruvec(folio); 1218 1219 spin_lock(&lruvec->lru_lock); 1220 lruvec_memcg_debug(lruvec, folio); 1221 1222 return lruvec; 1223 } 1224 1225 /** 1226 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1227 * @folio: Pointer to the folio. 1228 * 1229 * These functions are safe to use under any of the following conditions: 1230 * - folio locked 1231 * - folio_test_lru false 1232 * - folio frozen (refcount of 0) 1233 * 1234 * Return: The lruvec this folio is on with its lock held and interrupts 1235 * disabled. 1236 */ 1237 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1238 { 1239 struct lruvec *lruvec = folio_lruvec(folio); 1240 1241 spin_lock_irq(&lruvec->lru_lock); 1242 lruvec_memcg_debug(lruvec, folio); 1243 1244 return lruvec; 1245 } 1246 1247 /** 1248 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1249 * @folio: Pointer to the folio. 1250 * @flags: Pointer to irqsave flags. 1251 * 1252 * These functions are safe to use under any of the following conditions: 1253 * - folio locked 1254 * - folio_test_lru false 1255 * - folio frozen (refcount of 0) 1256 * 1257 * Return: The lruvec this folio is on with its lock held and interrupts 1258 * disabled. 1259 */ 1260 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1261 unsigned long *flags) 1262 { 1263 struct lruvec *lruvec = folio_lruvec(folio); 1264 1265 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1266 lruvec_memcg_debug(lruvec, folio); 1267 1268 return lruvec; 1269 } 1270 1271 /** 1272 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1273 * @lruvec: mem_cgroup per zone lru vector 1274 * @lru: index of lru list the page is sitting on 1275 * @zid: zone id of the accounted pages 1276 * @nr_pages: positive when adding or negative when removing 1277 * 1278 * This function must be called under lru_lock, just before a page is added 1279 * to or just after a page is removed from an lru list. 1280 */ 1281 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1282 int zid, int nr_pages) 1283 { 1284 struct mem_cgroup_per_node *mz; 1285 unsigned long *lru_size; 1286 long size; 1287 1288 if (mem_cgroup_disabled()) 1289 return; 1290 1291 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1292 lru_size = &mz->lru_zone_size[zid][lru]; 1293 1294 if (nr_pages < 0) 1295 *lru_size += nr_pages; 1296 1297 size = *lru_size; 1298 if (WARN_ONCE(size < 0, 1299 "%s(%p, %d, %d): lru_size %ld\n", 1300 __func__, lruvec, lru, nr_pages, size)) { 1301 VM_BUG_ON(1); 1302 *lru_size = 0; 1303 } 1304 1305 if (nr_pages > 0) 1306 *lru_size += nr_pages; 1307 } 1308 1309 /** 1310 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1311 * @memcg: the memory cgroup 1312 * 1313 * Returns the maximum amount of memory @mem can be charged with, in 1314 * pages. 1315 */ 1316 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1317 { 1318 unsigned long margin = 0; 1319 unsigned long count; 1320 unsigned long limit; 1321 1322 count = page_counter_read(&memcg->memory); 1323 limit = READ_ONCE(memcg->memory.max); 1324 if (count < limit) 1325 margin = limit - count; 1326 1327 if (do_memsw_account()) { 1328 count = page_counter_read(&memcg->memsw); 1329 limit = READ_ONCE(memcg->memsw.max); 1330 if (count < limit) 1331 margin = min(margin, limit - count); 1332 else 1333 margin = 0; 1334 } 1335 1336 return margin; 1337 } 1338 1339 struct memory_stat { 1340 const char *name; 1341 unsigned int idx; 1342 }; 1343 1344 static const struct memory_stat memory_stats[] = { 1345 { "anon", NR_ANON_MAPPED }, 1346 { "file", NR_FILE_PAGES }, 1347 { "kernel", MEMCG_KMEM }, 1348 { "kernel_stack", NR_KERNEL_STACK_KB }, 1349 { "pagetables", NR_PAGETABLE }, 1350 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1351 { "percpu", MEMCG_PERCPU_B }, 1352 { "sock", MEMCG_SOCK }, 1353 { "vmalloc", MEMCG_VMALLOC }, 1354 { "shmem", NR_SHMEM }, 1355 #ifdef CONFIG_ZSWAP 1356 { "zswap", MEMCG_ZSWAP_B }, 1357 { "zswapped", MEMCG_ZSWAPPED }, 1358 #endif 1359 { "file_mapped", NR_FILE_MAPPED }, 1360 { "file_dirty", NR_FILE_DIRTY }, 1361 { "file_writeback", NR_WRITEBACK }, 1362 #ifdef CONFIG_SWAP 1363 { "swapcached", NR_SWAPCACHE }, 1364 #endif 1365 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1366 { "anon_thp", NR_ANON_THPS }, 1367 { "file_thp", NR_FILE_THPS }, 1368 { "shmem_thp", NR_SHMEM_THPS }, 1369 #endif 1370 { "inactive_anon", NR_INACTIVE_ANON }, 1371 { "active_anon", NR_ACTIVE_ANON }, 1372 { "inactive_file", NR_INACTIVE_FILE }, 1373 { "active_file", NR_ACTIVE_FILE }, 1374 { "unevictable", NR_UNEVICTABLE }, 1375 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1376 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1377 #ifdef CONFIG_HUGETLB_PAGE 1378 { "hugetlb", NR_HUGETLB }, 1379 #endif 1380 1381 /* The memory events */ 1382 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1383 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1384 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1385 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1386 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1387 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1388 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1389 1390 { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, 1391 { "pgdemote_direct", PGDEMOTE_DIRECT }, 1392 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, 1393 #ifdef CONFIG_NUMA_BALANCING 1394 { "pgpromote_success", PGPROMOTE_SUCCESS }, 1395 #endif 1396 }; 1397 1398 /* The actual unit of the state item, not the same as the output unit */ 1399 static int memcg_page_state_unit(int item) 1400 { 1401 switch (item) { 1402 case MEMCG_PERCPU_B: 1403 case MEMCG_ZSWAP_B: 1404 case NR_SLAB_RECLAIMABLE_B: 1405 case NR_SLAB_UNRECLAIMABLE_B: 1406 return 1; 1407 case NR_KERNEL_STACK_KB: 1408 return SZ_1K; 1409 default: 1410 return PAGE_SIZE; 1411 } 1412 } 1413 1414 /* Translate stat items to the correct unit for memory.stat output */ 1415 static int memcg_page_state_output_unit(int item) 1416 { 1417 /* 1418 * Workingset state is actually in pages, but we export it to userspace 1419 * as a scalar count of events, so special case it here. 1420 * 1421 * Demotion and promotion activities are exported in pages, consistent 1422 * with their global counterparts. 1423 */ 1424 switch (item) { 1425 case WORKINGSET_REFAULT_ANON: 1426 case WORKINGSET_REFAULT_FILE: 1427 case WORKINGSET_ACTIVATE_ANON: 1428 case WORKINGSET_ACTIVATE_FILE: 1429 case WORKINGSET_RESTORE_ANON: 1430 case WORKINGSET_RESTORE_FILE: 1431 case WORKINGSET_NODERECLAIM: 1432 case PGDEMOTE_KSWAPD: 1433 case PGDEMOTE_DIRECT: 1434 case PGDEMOTE_KHUGEPAGED: 1435 #ifdef CONFIG_NUMA_BALANCING 1436 case PGPROMOTE_SUCCESS: 1437 #endif 1438 return 1; 1439 default: 1440 return memcg_page_state_unit(item); 1441 } 1442 } 1443 1444 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) 1445 { 1446 return memcg_page_state(memcg, item) * 1447 memcg_page_state_output_unit(item); 1448 } 1449 1450 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) 1451 { 1452 return memcg_page_state_local(memcg, item) * 1453 memcg_page_state_output_unit(item); 1454 } 1455 1456 #ifdef CONFIG_HUGETLB_PAGE 1457 static bool memcg_accounts_hugetlb(void) 1458 { 1459 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; 1460 } 1461 #else /* CONFIG_HUGETLB_PAGE */ 1462 static bool memcg_accounts_hugetlb(void) 1463 { 1464 return false; 1465 } 1466 #endif /* CONFIG_HUGETLB_PAGE */ 1467 1468 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1469 { 1470 int i; 1471 1472 /* 1473 * Provide statistics on the state of the memory subsystem as 1474 * well as cumulative event counters that show past behavior. 1475 * 1476 * This list is ordered following a combination of these gradients: 1477 * 1) generic big picture -> specifics and details 1478 * 2) reflecting userspace activity -> reflecting kernel heuristics 1479 * 1480 * Current memory state: 1481 */ 1482 mem_cgroup_flush_stats(memcg); 1483 1484 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1485 u64 size; 1486 1487 #ifdef CONFIG_HUGETLB_PAGE 1488 if (unlikely(memory_stats[i].idx == NR_HUGETLB) && 1489 !memcg_accounts_hugetlb()) 1490 continue; 1491 #endif 1492 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1493 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); 1494 1495 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1496 size += memcg_page_state_output(memcg, 1497 NR_SLAB_RECLAIMABLE_B); 1498 seq_buf_printf(s, "slab %llu\n", size); 1499 } 1500 } 1501 1502 /* Accumulated memory events */ 1503 seq_buf_printf(s, "pgscan %lu\n", 1504 memcg_events(memcg, PGSCAN_KSWAPD) + 1505 memcg_events(memcg, PGSCAN_DIRECT) + 1506 memcg_events(memcg, PGSCAN_KHUGEPAGED)); 1507 seq_buf_printf(s, "pgsteal %lu\n", 1508 memcg_events(memcg, PGSTEAL_KSWAPD) + 1509 memcg_events(memcg, PGSTEAL_DIRECT) + 1510 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); 1511 1512 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1513 #ifdef CONFIG_MEMCG_V1 1514 if (memcg_vm_event_stat[i] == PGPGIN || 1515 memcg_vm_event_stat[i] == PGPGOUT) 1516 continue; 1517 #endif 1518 seq_buf_printf(s, "%s %lu\n", 1519 vm_event_name(memcg_vm_event_stat[i]), 1520 memcg_events(memcg, memcg_vm_event_stat[i])); 1521 } 1522 } 1523 1524 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1525 { 1526 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1527 memcg_stat_format(memcg, s); 1528 else 1529 memcg1_stat_format(memcg, s); 1530 if (seq_buf_has_overflowed(s)) 1531 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__); 1532 } 1533 1534 /** 1535 * mem_cgroup_print_oom_context: Print OOM information relevant to 1536 * memory controller. 1537 * @memcg: The memory cgroup that went over limit 1538 * @p: Task that is going to be killed 1539 * 1540 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1541 * enabled 1542 */ 1543 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1544 { 1545 rcu_read_lock(); 1546 1547 if (memcg) { 1548 pr_cont(",oom_memcg="); 1549 pr_cont_cgroup_path(memcg->css.cgroup); 1550 } else 1551 pr_cont(",global_oom"); 1552 if (p) { 1553 pr_cont(",task_memcg="); 1554 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1555 } 1556 rcu_read_unlock(); 1557 } 1558 1559 /** 1560 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1561 * memory controller. 1562 * @memcg: The memory cgroup that went over limit 1563 */ 1564 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1565 { 1566 /* Use static buffer, for the caller is holding oom_lock. */ 1567 static char buf[SEQ_BUF_SIZE]; 1568 struct seq_buf s; 1569 1570 lockdep_assert_held(&oom_lock); 1571 1572 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1573 K((u64)page_counter_read(&memcg->memory)), 1574 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1575 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1576 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1577 K((u64)page_counter_read(&memcg->swap)), 1578 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1579 #ifdef CONFIG_MEMCG_V1 1580 else { 1581 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1582 K((u64)page_counter_read(&memcg->memsw)), 1583 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1584 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1585 K((u64)page_counter_read(&memcg->kmem)), 1586 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1587 } 1588 #endif 1589 1590 pr_info("Memory cgroup stats for "); 1591 pr_cont_cgroup_path(memcg->css.cgroup); 1592 pr_cont(":"); 1593 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 1594 memory_stat_format(memcg, &s); 1595 seq_buf_do_printk(&s, KERN_INFO); 1596 } 1597 1598 /* 1599 * Return the memory (and swap, if configured) limit for a memcg. 1600 */ 1601 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1602 { 1603 unsigned long max = READ_ONCE(memcg->memory.max); 1604 1605 if (do_memsw_account()) { 1606 if (mem_cgroup_swappiness(memcg)) { 1607 /* Calculate swap excess capacity from memsw limit */ 1608 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1609 1610 max += min(swap, (unsigned long)total_swap_pages); 1611 } 1612 } else { 1613 if (mem_cgroup_swappiness(memcg)) 1614 max += min(READ_ONCE(memcg->swap.max), 1615 (unsigned long)total_swap_pages); 1616 } 1617 return max; 1618 } 1619 1620 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1621 { 1622 return page_counter_read(&memcg->memory); 1623 } 1624 1625 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1626 int order) 1627 { 1628 struct oom_control oc = { 1629 .zonelist = NULL, 1630 .nodemask = NULL, 1631 .memcg = memcg, 1632 .gfp_mask = gfp_mask, 1633 .order = order, 1634 }; 1635 bool ret = true; 1636 1637 if (mutex_lock_killable(&oom_lock)) 1638 return true; 1639 1640 if (mem_cgroup_margin(memcg) >= (1 << order)) 1641 goto unlock; 1642 1643 /* 1644 * A few threads which were not waiting at mutex_lock_killable() can 1645 * fail to bail out. Therefore, check again after holding oom_lock. 1646 */ 1647 ret = task_is_dying() || out_of_memory(&oc); 1648 1649 unlock: 1650 mutex_unlock(&oom_lock); 1651 return ret; 1652 } 1653 1654 /* 1655 * Returns true if successfully killed one or more processes. Though in some 1656 * corner cases it can return true even without killing any process. 1657 */ 1658 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1659 { 1660 bool locked, ret; 1661 1662 if (order > PAGE_ALLOC_COSTLY_ORDER) 1663 return false; 1664 1665 memcg_memory_event(memcg, MEMCG_OOM); 1666 1667 if (!memcg1_oom_prepare(memcg, &locked)) 1668 return false; 1669 1670 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1671 1672 memcg1_oom_finish(memcg, locked); 1673 1674 return ret; 1675 } 1676 1677 /** 1678 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1679 * @victim: task to be killed by the OOM killer 1680 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1681 * 1682 * Returns a pointer to a memory cgroup, which has to be cleaned up 1683 * by killing all belonging OOM-killable tasks. 1684 * 1685 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1686 */ 1687 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1688 struct mem_cgroup *oom_domain) 1689 { 1690 struct mem_cgroup *oom_group = NULL; 1691 struct mem_cgroup *memcg; 1692 1693 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1694 return NULL; 1695 1696 if (!oom_domain) 1697 oom_domain = root_mem_cgroup; 1698 1699 rcu_read_lock(); 1700 1701 memcg = mem_cgroup_from_task(victim); 1702 if (mem_cgroup_is_root(memcg)) 1703 goto out; 1704 1705 /* 1706 * If the victim task has been asynchronously moved to a different 1707 * memory cgroup, we might end up killing tasks outside oom_domain. 1708 * In this case it's better to ignore memory.group.oom. 1709 */ 1710 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1711 goto out; 1712 1713 /* 1714 * Traverse the memory cgroup hierarchy from the victim task's 1715 * cgroup up to the OOMing cgroup (or root) to find the 1716 * highest-level memory cgroup with oom.group set. 1717 */ 1718 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1719 if (READ_ONCE(memcg->oom_group)) 1720 oom_group = memcg; 1721 1722 if (memcg == oom_domain) 1723 break; 1724 } 1725 1726 if (oom_group) 1727 css_get(&oom_group->css); 1728 out: 1729 rcu_read_unlock(); 1730 1731 return oom_group; 1732 } 1733 1734 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1735 { 1736 pr_info("Tasks in "); 1737 pr_cont_cgroup_path(memcg->css.cgroup); 1738 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1739 } 1740 1741 struct memcg_stock_pcp { 1742 local_lock_t stock_lock; 1743 struct mem_cgroup *cached; /* this never be root cgroup */ 1744 unsigned int nr_pages; 1745 1746 struct obj_cgroup *cached_objcg; 1747 struct pglist_data *cached_pgdat; 1748 unsigned int nr_bytes; 1749 int nr_slab_reclaimable_b; 1750 int nr_slab_unreclaimable_b; 1751 1752 struct work_struct work; 1753 unsigned long flags; 1754 #define FLUSHING_CACHED_CHARGE 0 1755 }; 1756 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { 1757 .stock_lock = INIT_LOCAL_LOCK(stock_lock), 1758 }; 1759 static DEFINE_MUTEX(percpu_charge_mutex); 1760 1761 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock); 1762 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 1763 struct mem_cgroup *root_memcg); 1764 1765 /** 1766 * consume_stock: Try to consume stocked charge on this cpu. 1767 * @memcg: memcg to consume from. 1768 * @nr_pages: how many pages to charge. 1769 * 1770 * The charges will only happen if @memcg matches the current cpu's memcg 1771 * stock, and at least @nr_pages are available in that stock. Failure to 1772 * service an allocation will refill the stock. 1773 * 1774 * returns true if successful, false otherwise. 1775 */ 1776 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1777 { 1778 struct memcg_stock_pcp *stock; 1779 unsigned int stock_pages; 1780 unsigned long flags; 1781 bool ret = false; 1782 1783 if (nr_pages > MEMCG_CHARGE_BATCH) 1784 return ret; 1785 1786 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1787 1788 stock = this_cpu_ptr(&memcg_stock); 1789 stock_pages = READ_ONCE(stock->nr_pages); 1790 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { 1791 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages); 1792 ret = true; 1793 } 1794 1795 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1796 1797 return ret; 1798 } 1799 1800 /* 1801 * Returns stocks cached in percpu and reset cached information. 1802 */ 1803 static void drain_stock(struct memcg_stock_pcp *stock) 1804 { 1805 unsigned int stock_pages = READ_ONCE(stock->nr_pages); 1806 struct mem_cgroup *old = READ_ONCE(stock->cached); 1807 1808 if (!old) 1809 return; 1810 1811 if (stock_pages) { 1812 page_counter_uncharge(&old->memory, stock_pages); 1813 if (do_memsw_account()) 1814 page_counter_uncharge(&old->memsw, stock_pages); 1815 1816 WRITE_ONCE(stock->nr_pages, 0); 1817 } 1818 1819 css_put(&old->css); 1820 WRITE_ONCE(stock->cached, NULL); 1821 } 1822 1823 static void drain_local_stock(struct work_struct *dummy) 1824 { 1825 struct memcg_stock_pcp *stock; 1826 struct obj_cgroup *old = NULL; 1827 unsigned long flags; 1828 1829 /* 1830 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. 1831 * drain_stock races is that we always operate on local CPU stock 1832 * here with IRQ disabled 1833 */ 1834 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1835 1836 stock = this_cpu_ptr(&memcg_stock); 1837 old = drain_obj_stock(stock); 1838 drain_stock(stock); 1839 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1840 1841 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1842 obj_cgroup_put(old); 1843 } 1844 1845 /* 1846 * Cache charges(val) to local per_cpu area. 1847 * This will be consumed by consume_stock() function, later. 1848 */ 1849 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1850 { 1851 struct memcg_stock_pcp *stock; 1852 unsigned int stock_pages; 1853 1854 stock = this_cpu_ptr(&memcg_stock); 1855 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ 1856 drain_stock(stock); 1857 css_get(&memcg->css); 1858 WRITE_ONCE(stock->cached, memcg); 1859 } 1860 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages; 1861 WRITE_ONCE(stock->nr_pages, stock_pages); 1862 1863 if (stock_pages > MEMCG_CHARGE_BATCH) 1864 drain_stock(stock); 1865 } 1866 1867 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1868 { 1869 unsigned long flags; 1870 1871 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1872 __refill_stock(memcg, nr_pages); 1873 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1874 } 1875 1876 /* 1877 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1878 * of the hierarchy under it. 1879 */ 1880 void drain_all_stock(struct mem_cgroup *root_memcg) 1881 { 1882 int cpu, curcpu; 1883 1884 /* If someone's already draining, avoid adding running more workers. */ 1885 if (!mutex_trylock(&percpu_charge_mutex)) 1886 return; 1887 /* 1888 * Notify other cpus that system-wide "drain" is running 1889 * We do not care about races with the cpu hotplug because cpu down 1890 * as well as workers from this path always operate on the local 1891 * per-cpu data. CPU up doesn't touch memcg_stock at all. 1892 */ 1893 migrate_disable(); 1894 curcpu = smp_processor_id(); 1895 for_each_online_cpu(cpu) { 1896 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1897 struct mem_cgroup *memcg; 1898 bool flush = false; 1899 1900 rcu_read_lock(); 1901 memcg = READ_ONCE(stock->cached); 1902 if (memcg && READ_ONCE(stock->nr_pages) && 1903 mem_cgroup_is_descendant(memcg, root_memcg)) 1904 flush = true; 1905 else if (obj_stock_flush_required(stock, root_memcg)) 1906 flush = true; 1907 rcu_read_unlock(); 1908 1909 if (flush && 1910 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1911 if (cpu == curcpu) 1912 drain_local_stock(&stock->work); 1913 else if (!cpu_is_isolated(cpu)) 1914 schedule_work_on(cpu, &stock->work); 1915 } 1916 } 1917 migrate_enable(); 1918 mutex_unlock(&percpu_charge_mutex); 1919 } 1920 1921 static int memcg_hotplug_cpu_dead(unsigned int cpu) 1922 { 1923 struct memcg_stock_pcp *stock; 1924 1925 stock = &per_cpu(memcg_stock, cpu); 1926 drain_stock(stock); 1927 1928 return 0; 1929 } 1930 1931 static unsigned long reclaim_high(struct mem_cgroup *memcg, 1932 unsigned int nr_pages, 1933 gfp_t gfp_mask) 1934 { 1935 unsigned long nr_reclaimed = 0; 1936 1937 do { 1938 unsigned long pflags; 1939 1940 if (page_counter_read(&memcg->memory) <= 1941 READ_ONCE(memcg->memory.high)) 1942 continue; 1943 1944 memcg_memory_event(memcg, MEMCG_HIGH); 1945 1946 psi_memstall_enter(&pflags); 1947 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 1948 gfp_mask, 1949 MEMCG_RECLAIM_MAY_SWAP, 1950 NULL); 1951 psi_memstall_leave(&pflags); 1952 } while ((memcg = parent_mem_cgroup(memcg)) && 1953 !mem_cgroup_is_root(memcg)); 1954 1955 return nr_reclaimed; 1956 } 1957 1958 static void high_work_func(struct work_struct *work) 1959 { 1960 struct mem_cgroup *memcg; 1961 1962 memcg = container_of(work, struct mem_cgroup, high_work); 1963 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 1964 } 1965 1966 /* 1967 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 1968 * enough to still cause a significant slowdown in most cases, while still 1969 * allowing diagnostics and tracing to proceed without becoming stuck. 1970 */ 1971 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 1972 1973 /* 1974 * When calculating the delay, we use these either side of the exponentiation to 1975 * maintain precision and scale to a reasonable number of jiffies (see the table 1976 * below. 1977 * 1978 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 1979 * overage ratio to a delay. 1980 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 1981 * proposed penalty in order to reduce to a reasonable number of jiffies, and 1982 * to produce a reasonable delay curve. 1983 * 1984 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 1985 * reasonable delay curve compared to precision-adjusted overage, not 1986 * penalising heavily at first, but still making sure that growth beyond the 1987 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 1988 * example, with a high of 100 megabytes: 1989 * 1990 * +-------+------------------------+ 1991 * | usage | time to allocate in ms | 1992 * +-------+------------------------+ 1993 * | 100M | 0 | 1994 * | 101M | 6 | 1995 * | 102M | 25 | 1996 * | 103M | 57 | 1997 * | 104M | 102 | 1998 * | 105M | 159 | 1999 * | 106M | 230 | 2000 * | 107M | 313 | 2001 * | 108M | 409 | 2002 * | 109M | 518 | 2003 * | 110M | 639 | 2004 * | 111M | 774 | 2005 * | 112M | 921 | 2006 * | 113M | 1081 | 2007 * | 114M | 1254 | 2008 * | 115M | 1439 | 2009 * | 116M | 1638 | 2010 * | 117M | 1849 | 2011 * | 118M | 2000 | 2012 * | 119M | 2000 | 2013 * | 120M | 2000 | 2014 * +-------+------------------------+ 2015 */ 2016 #define MEMCG_DELAY_PRECISION_SHIFT 20 2017 #define MEMCG_DELAY_SCALING_SHIFT 14 2018 2019 static u64 calculate_overage(unsigned long usage, unsigned long high) 2020 { 2021 u64 overage; 2022 2023 if (usage <= high) 2024 return 0; 2025 2026 /* 2027 * Prevent division by 0 in overage calculation by acting as if 2028 * it was a threshold of 1 page 2029 */ 2030 high = max(high, 1UL); 2031 2032 overage = usage - high; 2033 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2034 return div64_u64(overage, high); 2035 } 2036 2037 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2038 { 2039 u64 overage, max_overage = 0; 2040 2041 do { 2042 overage = calculate_overage(page_counter_read(&memcg->memory), 2043 READ_ONCE(memcg->memory.high)); 2044 max_overage = max(overage, max_overage); 2045 } while ((memcg = parent_mem_cgroup(memcg)) && 2046 !mem_cgroup_is_root(memcg)); 2047 2048 return max_overage; 2049 } 2050 2051 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2052 { 2053 u64 overage, max_overage = 0; 2054 2055 do { 2056 overage = calculate_overage(page_counter_read(&memcg->swap), 2057 READ_ONCE(memcg->swap.high)); 2058 if (overage) 2059 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2060 max_overage = max(overage, max_overage); 2061 } while ((memcg = parent_mem_cgroup(memcg)) && 2062 !mem_cgroup_is_root(memcg)); 2063 2064 return max_overage; 2065 } 2066 2067 /* 2068 * Get the number of jiffies that we should penalise a mischievous cgroup which 2069 * is exceeding its memory.high by checking both it and its ancestors. 2070 */ 2071 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2072 unsigned int nr_pages, 2073 u64 max_overage) 2074 { 2075 unsigned long penalty_jiffies; 2076 2077 if (!max_overage) 2078 return 0; 2079 2080 /* 2081 * We use overage compared to memory.high to calculate the number of 2082 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2083 * fairly lenient on small overages, and increasingly harsh when the 2084 * memcg in question makes it clear that it has no intention of stopping 2085 * its crazy behaviour, so we exponentially increase the delay based on 2086 * overage amount. 2087 */ 2088 penalty_jiffies = max_overage * max_overage * HZ; 2089 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2090 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2091 2092 /* 2093 * Factor in the task's own contribution to the overage, such that four 2094 * N-sized allocations are throttled approximately the same as one 2095 * 4N-sized allocation. 2096 * 2097 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2098 * larger the current charge patch is than that. 2099 */ 2100 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2101 } 2102 2103 /* 2104 * Reclaims memory over the high limit. Called directly from 2105 * try_charge() (context permitting), as well as from the userland 2106 * return path where reclaim is always able to block. 2107 */ 2108 void mem_cgroup_handle_over_high(gfp_t gfp_mask) 2109 { 2110 unsigned long penalty_jiffies; 2111 unsigned long pflags; 2112 unsigned long nr_reclaimed; 2113 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2114 int nr_retries = MAX_RECLAIM_RETRIES; 2115 struct mem_cgroup *memcg; 2116 bool in_retry = false; 2117 2118 if (likely(!nr_pages)) 2119 return; 2120 2121 memcg = get_mem_cgroup_from_mm(current->mm); 2122 current->memcg_nr_pages_over_high = 0; 2123 2124 retry_reclaim: 2125 /* 2126 * Bail if the task is already exiting. Unlike memory.max, 2127 * memory.high enforcement isn't as strict, and there is no 2128 * OOM killer involved, which means the excess could already 2129 * be much bigger (and still growing) than it could for 2130 * memory.max; the dying task could get stuck in fruitless 2131 * reclaim for a long time, which isn't desirable. 2132 */ 2133 if (task_is_dying()) 2134 goto out; 2135 2136 /* 2137 * The allocating task should reclaim at least the batch size, but for 2138 * subsequent retries we only want to do what's necessary to prevent oom 2139 * or breaching resource isolation. 2140 * 2141 * This is distinct from memory.max or page allocator behaviour because 2142 * memory.high is currently batched, whereas memory.max and the page 2143 * allocator run every time an allocation is made. 2144 */ 2145 nr_reclaimed = reclaim_high(memcg, 2146 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2147 gfp_mask); 2148 2149 /* 2150 * memory.high is breached and reclaim is unable to keep up. Throttle 2151 * allocators proactively to slow down excessive growth. 2152 */ 2153 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2154 mem_find_max_overage(memcg)); 2155 2156 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2157 swap_find_max_overage(memcg)); 2158 2159 /* 2160 * Clamp the max delay per usermode return so as to still keep the 2161 * application moving forwards and also permit diagnostics, albeit 2162 * extremely slowly. 2163 */ 2164 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2165 2166 /* 2167 * Don't sleep if the amount of jiffies this memcg owes us is so low 2168 * that it's not even worth doing, in an attempt to be nice to those who 2169 * go only a small amount over their memory.high value and maybe haven't 2170 * been aggressively reclaimed enough yet. 2171 */ 2172 if (penalty_jiffies <= HZ / 100) 2173 goto out; 2174 2175 /* 2176 * If reclaim is making forward progress but we're still over 2177 * memory.high, we want to encourage that rather than doing allocator 2178 * throttling. 2179 */ 2180 if (nr_reclaimed || nr_retries--) { 2181 in_retry = true; 2182 goto retry_reclaim; 2183 } 2184 2185 /* 2186 * Reclaim didn't manage to push usage below the limit, slow 2187 * this allocating task down. 2188 * 2189 * If we exit early, we're guaranteed to die (since 2190 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2191 * need to account for any ill-begotten jiffies to pay them off later. 2192 */ 2193 psi_memstall_enter(&pflags); 2194 schedule_timeout_killable(penalty_jiffies); 2195 psi_memstall_leave(&pflags); 2196 2197 out: 2198 css_put(&memcg->css); 2199 } 2200 2201 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2202 unsigned int nr_pages) 2203 { 2204 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2205 int nr_retries = MAX_RECLAIM_RETRIES; 2206 struct mem_cgroup *mem_over_limit; 2207 struct page_counter *counter; 2208 unsigned long nr_reclaimed; 2209 bool passed_oom = false; 2210 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2211 bool drained = false; 2212 bool raised_max_event = false; 2213 unsigned long pflags; 2214 2215 retry: 2216 if (consume_stock(memcg, nr_pages)) 2217 return 0; 2218 2219 if (!do_memsw_account() || 2220 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2221 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2222 goto done_restock; 2223 if (do_memsw_account()) 2224 page_counter_uncharge(&memcg->memsw, batch); 2225 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2226 } else { 2227 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2228 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2229 } 2230 2231 if (batch > nr_pages) { 2232 batch = nr_pages; 2233 goto retry; 2234 } 2235 2236 /* 2237 * Prevent unbounded recursion when reclaim operations need to 2238 * allocate memory. This might exceed the limits temporarily, 2239 * but we prefer facilitating memory reclaim and getting back 2240 * under the limit over triggering OOM kills in these cases. 2241 */ 2242 if (unlikely(current->flags & PF_MEMALLOC)) 2243 goto force; 2244 2245 if (unlikely(task_in_memcg_oom(current))) 2246 goto nomem; 2247 2248 if (!gfpflags_allow_blocking(gfp_mask)) 2249 goto nomem; 2250 2251 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2252 raised_max_event = true; 2253 2254 psi_memstall_enter(&pflags); 2255 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2256 gfp_mask, reclaim_options, NULL); 2257 psi_memstall_leave(&pflags); 2258 2259 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2260 goto retry; 2261 2262 if (!drained) { 2263 drain_all_stock(mem_over_limit); 2264 drained = true; 2265 goto retry; 2266 } 2267 2268 if (gfp_mask & __GFP_NORETRY) 2269 goto nomem; 2270 /* 2271 * Even though the limit is exceeded at this point, reclaim 2272 * may have been able to free some pages. Retry the charge 2273 * before killing the task. 2274 * 2275 * Only for regular pages, though: huge pages are rather 2276 * unlikely to succeed so close to the limit, and we fall back 2277 * to regular pages anyway in case of failure. 2278 */ 2279 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2280 goto retry; 2281 2282 if (nr_retries--) 2283 goto retry; 2284 2285 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2286 goto nomem; 2287 2288 /* Avoid endless loop for tasks bypassed by the oom killer */ 2289 if (passed_oom && task_is_dying()) 2290 goto nomem; 2291 2292 /* 2293 * keep retrying as long as the memcg oom killer is able to make 2294 * a forward progress or bypass the charge if the oom killer 2295 * couldn't make any progress. 2296 */ 2297 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2298 get_order(nr_pages * PAGE_SIZE))) { 2299 passed_oom = true; 2300 nr_retries = MAX_RECLAIM_RETRIES; 2301 goto retry; 2302 } 2303 nomem: 2304 /* 2305 * Memcg doesn't have a dedicated reserve for atomic 2306 * allocations. But like the global atomic pool, we need to 2307 * put the burden of reclaim on regular allocation requests 2308 * and let these go through as privileged allocations. 2309 */ 2310 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2311 return -ENOMEM; 2312 force: 2313 /* 2314 * If the allocation has to be enforced, don't forget to raise 2315 * a MEMCG_MAX event. 2316 */ 2317 if (!raised_max_event) 2318 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2319 2320 /* 2321 * The allocation either can't fail or will lead to more memory 2322 * being freed very soon. Allow memory usage go over the limit 2323 * temporarily by force charging it. 2324 */ 2325 page_counter_charge(&memcg->memory, nr_pages); 2326 if (do_memsw_account()) 2327 page_counter_charge(&memcg->memsw, nr_pages); 2328 2329 return 0; 2330 2331 done_restock: 2332 if (batch > nr_pages) 2333 refill_stock(memcg, batch - nr_pages); 2334 2335 /* 2336 * If the hierarchy is above the normal consumption range, schedule 2337 * reclaim on returning to userland. We can perform reclaim here 2338 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2339 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2340 * not recorded as it most likely matches current's and won't 2341 * change in the meantime. As high limit is checked again before 2342 * reclaim, the cost of mismatch is negligible. 2343 */ 2344 do { 2345 bool mem_high, swap_high; 2346 2347 mem_high = page_counter_read(&memcg->memory) > 2348 READ_ONCE(memcg->memory.high); 2349 swap_high = page_counter_read(&memcg->swap) > 2350 READ_ONCE(memcg->swap.high); 2351 2352 /* Don't bother a random interrupted task */ 2353 if (!in_task()) { 2354 if (mem_high) { 2355 schedule_work(&memcg->high_work); 2356 break; 2357 } 2358 continue; 2359 } 2360 2361 if (mem_high || swap_high) { 2362 /* 2363 * The allocating tasks in this cgroup will need to do 2364 * reclaim or be throttled to prevent further growth 2365 * of the memory or swap footprints. 2366 * 2367 * Target some best-effort fairness between the tasks, 2368 * and distribute reclaim work and delay penalties 2369 * based on how much each task is actually allocating. 2370 */ 2371 current->memcg_nr_pages_over_high += batch; 2372 set_notify_resume(current); 2373 break; 2374 } 2375 } while ((memcg = parent_mem_cgroup(memcg))); 2376 2377 /* 2378 * Reclaim is set up above to be called from the userland 2379 * return path. But also attempt synchronous reclaim to avoid 2380 * excessive overrun while the task is still inside the 2381 * kernel. If this is successful, the return path will see it 2382 * when it rechecks the overage and simply bail out. 2383 */ 2384 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2385 !(current->flags & PF_MEMALLOC) && 2386 gfpflags_allow_blocking(gfp_mask)) 2387 mem_cgroup_handle_over_high(gfp_mask); 2388 return 0; 2389 } 2390 2391 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2392 { 2393 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); 2394 /* 2395 * Any of the following ensures page's memcg stability: 2396 * 2397 * - the page lock 2398 * - LRU isolation 2399 * - exclusive reference 2400 */ 2401 folio->memcg_data = (unsigned long)memcg; 2402 } 2403 2404 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, 2405 struct pglist_data *pgdat, 2406 enum node_stat_item idx, int nr) 2407 { 2408 struct mem_cgroup *memcg; 2409 struct lruvec *lruvec; 2410 2411 rcu_read_lock(); 2412 memcg = obj_cgroup_memcg(objcg); 2413 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2414 __mod_memcg_lruvec_state(lruvec, idx, nr); 2415 rcu_read_unlock(); 2416 } 2417 2418 static __always_inline 2419 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) 2420 { 2421 /* 2422 * Slab objects are accounted individually, not per-page. 2423 * Memcg membership data for each individual object is saved in 2424 * slab->obj_exts. 2425 */ 2426 if (folio_test_slab(folio)) { 2427 struct slabobj_ext *obj_exts; 2428 struct slab *slab; 2429 unsigned int off; 2430 2431 slab = folio_slab(folio); 2432 obj_exts = slab_obj_exts(slab); 2433 if (!obj_exts) 2434 return NULL; 2435 2436 off = obj_to_index(slab->slab_cache, slab, p); 2437 if (obj_exts[off].objcg) 2438 return obj_cgroup_memcg(obj_exts[off].objcg); 2439 2440 return NULL; 2441 } 2442 2443 /* 2444 * folio_memcg_check() is used here, because in theory we can encounter 2445 * a folio where the slab flag has been cleared already, but 2446 * slab->obj_exts has not been freed yet 2447 * folio_memcg_check() will guarantee that a proper memory 2448 * cgroup pointer or NULL will be returned. 2449 */ 2450 return folio_memcg_check(folio); 2451 } 2452 2453 /* 2454 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2455 * It is not suitable for objects allocated using vmalloc(). 2456 * 2457 * A passed kernel object must be a slab object or a generic kernel page. 2458 * 2459 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2460 * cgroup_mutex, etc. 2461 */ 2462 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 2463 { 2464 if (mem_cgroup_disabled()) 2465 return NULL; 2466 2467 return mem_cgroup_from_obj_folio(virt_to_folio(p), p); 2468 } 2469 2470 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 2471 { 2472 struct obj_cgroup *objcg = NULL; 2473 2474 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2475 objcg = rcu_dereference(memcg->objcg); 2476 if (likely(objcg && obj_cgroup_tryget(objcg))) 2477 break; 2478 objcg = NULL; 2479 } 2480 return objcg; 2481 } 2482 2483 static struct obj_cgroup *current_objcg_update(void) 2484 { 2485 struct mem_cgroup *memcg; 2486 struct obj_cgroup *old, *objcg = NULL; 2487 2488 do { 2489 /* Atomically drop the update bit. */ 2490 old = xchg(¤t->objcg, NULL); 2491 if (old) { 2492 old = (struct obj_cgroup *) 2493 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); 2494 obj_cgroup_put(old); 2495 2496 old = NULL; 2497 } 2498 2499 /* If new objcg is NULL, no reason for the second atomic update. */ 2500 if (!current->mm || (current->flags & PF_KTHREAD)) 2501 return NULL; 2502 2503 /* 2504 * Release the objcg pointer from the previous iteration, 2505 * if try_cmpxcg() below fails. 2506 */ 2507 if (unlikely(objcg)) { 2508 obj_cgroup_put(objcg); 2509 objcg = NULL; 2510 } 2511 2512 /* 2513 * Obtain the new objcg pointer. The current task can be 2514 * asynchronously moved to another memcg and the previous 2515 * memcg can be offlined. So let's get the memcg pointer 2516 * and try get a reference to objcg under a rcu read lock. 2517 */ 2518 2519 rcu_read_lock(); 2520 memcg = mem_cgroup_from_task(current); 2521 objcg = __get_obj_cgroup_from_memcg(memcg); 2522 rcu_read_unlock(); 2523 2524 /* 2525 * Try set up a new objcg pointer atomically. If it 2526 * fails, it means the update flag was set concurrently, so 2527 * the whole procedure should be repeated. 2528 */ 2529 } while (!try_cmpxchg(¤t->objcg, &old, objcg)); 2530 2531 return objcg; 2532 } 2533 2534 __always_inline struct obj_cgroup *current_obj_cgroup(void) 2535 { 2536 struct mem_cgroup *memcg; 2537 struct obj_cgroup *objcg; 2538 2539 if (in_task()) { 2540 memcg = current->active_memcg; 2541 if (unlikely(memcg)) 2542 goto from_memcg; 2543 2544 objcg = READ_ONCE(current->objcg); 2545 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG)) 2546 objcg = current_objcg_update(); 2547 /* 2548 * Objcg reference is kept by the task, so it's safe 2549 * to use the objcg by the current task. 2550 */ 2551 return objcg; 2552 } 2553 2554 memcg = this_cpu_read(int_active_memcg); 2555 if (unlikely(memcg)) 2556 goto from_memcg; 2557 2558 return NULL; 2559 2560 from_memcg: 2561 objcg = NULL; 2562 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2563 /* 2564 * Memcg pointer is protected by scope (see set_active_memcg()) 2565 * and is pinning the corresponding objcg, so objcg can't go 2566 * away and can be used within the scope without any additional 2567 * protection. 2568 */ 2569 objcg = rcu_dereference_check(memcg->objcg, 1); 2570 if (likely(objcg)) 2571 break; 2572 } 2573 2574 return objcg; 2575 } 2576 2577 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 2578 { 2579 struct obj_cgroup *objcg; 2580 2581 if (!memcg_kmem_online()) 2582 return NULL; 2583 2584 if (folio_memcg_kmem(folio)) { 2585 objcg = __folio_objcg(folio); 2586 obj_cgroup_get(objcg); 2587 } else { 2588 struct mem_cgroup *memcg; 2589 2590 rcu_read_lock(); 2591 memcg = __folio_memcg(folio); 2592 if (memcg) 2593 objcg = __get_obj_cgroup_from_memcg(memcg); 2594 else 2595 objcg = NULL; 2596 rcu_read_unlock(); 2597 } 2598 return objcg; 2599 } 2600 2601 /* 2602 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2603 * @objcg: object cgroup to uncharge 2604 * @nr_pages: number of pages to uncharge 2605 */ 2606 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2607 unsigned int nr_pages) 2608 { 2609 struct mem_cgroup *memcg; 2610 2611 memcg = get_mem_cgroup_from_objcg(objcg); 2612 2613 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 2614 memcg1_account_kmem(memcg, -nr_pages); 2615 refill_stock(memcg, nr_pages); 2616 2617 css_put(&memcg->css); 2618 } 2619 2620 /* 2621 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2622 * @objcg: object cgroup to charge 2623 * @gfp: reclaim mode 2624 * @nr_pages: number of pages to charge 2625 * 2626 * Returns 0 on success, an error code on failure. 2627 */ 2628 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 2629 unsigned int nr_pages) 2630 { 2631 struct mem_cgroup *memcg; 2632 int ret; 2633 2634 memcg = get_mem_cgroup_from_objcg(objcg); 2635 2636 ret = try_charge_memcg(memcg, gfp, nr_pages); 2637 if (ret) 2638 goto out; 2639 2640 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); 2641 memcg1_account_kmem(memcg, nr_pages); 2642 out: 2643 css_put(&memcg->css); 2644 2645 return ret; 2646 } 2647 2648 /** 2649 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 2650 * @page: page to charge 2651 * @gfp: reclaim mode 2652 * @order: allocation order 2653 * 2654 * Returns 0 on success, an error code on failure. 2655 */ 2656 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 2657 { 2658 struct obj_cgroup *objcg; 2659 int ret = 0; 2660 2661 objcg = current_obj_cgroup(); 2662 if (objcg) { 2663 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 2664 if (!ret) { 2665 obj_cgroup_get(objcg); 2666 page->memcg_data = (unsigned long)objcg | 2667 MEMCG_DATA_KMEM; 2668 return 0; 2669 } 2670 } 2671 return ret; 2672 } 2673 2674 /** 2675 * __memcg_kmem_uncharge_page: uncharge a kmem page 2676 * @page: page to uncharge 2677 * @order: allocation order 2678 */ 2679 void __memcg_kmem_uncharge_page(struct page *page, int order) 2680 { 2681 struct folio *folio = page_folio(page); 2682 struct obj_cgroup *objcg; 2683 unsigned int nr_pages = 1 << order; 2684 2685 if (!folio_memcg_kmem(folio)) 2686 return; 2687 2688 objcg = __folio_objcg(folio); 2689 obj_cgroup_uncharge_pages(objcg, nr_pages); 2690 folio->memcg_data = 0; 2691 obj_cgroup_put(objcg); 2692 } 2693 2694 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 2695 enum node_stat_item idx, int nr) 2696 { 2697 struct memcg_stock_pcp *stock; 2698 struct obj_cgroup *old = NULL; 2699 unsigned long flags; 2700 int *bytes; 2701 2702 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2703 stock = this_cpu_ptr(&memcg_stock); 2704 2705 /* 2706 * Save vmstat data in stock and skip vmstat array update unless 2707 * accumulating over a page of vmstat data or when pgdat or idx 2708 * changes. 2709 */ 2710 if (READ_ONCE(stock->cached_objcg) != objcg) { 2711 old = drain_obj_stock(stock); 2712 obj_cgroup_get(objcg); 2713 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 2714 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 2715 WRITE_ONCE(stock->cached_objcg, objcg); 2716 stock->cached_pgdat = pgdat; 2717 } else if (stock->cached_pgdat != pgdat) { 2718 /* Flush the existing cached vmstat data */ 2719 struct pglist_data *oldpg = stock->cached_pgdat; 2720 2721 if (stock->nr_slab_reclaimable_b) { 2722 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 2723 stock->nr_slab_reclaimable_b); 2724 stock->nr_slab_reclaimable_b = 0; 2725 } 2726 if (stock->nr_slab_unreclaimable_b) { 2727 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 2728 stock->nr_slab_unreclaimable_b); 2729 stock->nr_slab_unreclaimable_b = 0; 2730 } 2731 stock->cached_pgdat = pgdat; 2732 } 2733 2734 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 2735 : &stock->nr_slab_unreclaimable_b; 2736 /* 2737 * Even for large object >= PAGE_SIZE, the vmstat data will still be 2738 * cached locally at least once before pushing it out. 2739 */ 2740 if (!*bytes) { 2741 *bytes = nr; 2742 nr = 0; 2743 } else { 2744 *bytes += nr; 2745 if (abs(*bytes) > PAGE_SIZE) { 2746 nr = *bytes; 2747 *bytes = 0; 2748 } else { 2749 nr = 0; 2750 } 2751 } 2752 if (nr) 2753 __mod_objcg_mlstate(objcg, pgdat, idx, nr); 2754 2755 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2756 obj_cgroup_put(old); 2757 } 2758 2759 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 2760 { 2761 struct memcg_stock_pcp *stock; 2762 unsigned long flags; 2763 bool ret = false; 2764 2765 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2766 2767 stock = this_cpu_ptr(&memcg_stock); 2768 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { 2769 stock->nr_bytes -= nr_bytes; 2770 ret = true; 2771 } 2772 2773 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2774 2775 return ret; 2776 } 2777 2778 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 2779 { 2780 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); 2781 2782 if (!old) 2783 return NULL; 2784 2785 if (stock->nr_bytes) { 2786 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 2787 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 2788 2789 if (nr_pages) { 2790 struct mem_cgroup *memcg; 2791 2792 memcg = get_mem_cgroup_from_objcg(old); 2793 2794 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 2795 memcg1_account_kmem(memcg, -nr_pages); 2796 __refill_stock(memcg, nr_pages); 2797 2798 css_put(&memcg->css); 2799 } 2800 2801 /* 2802 * The leftover is flushed to the centralized per-memcg value. 2803 * On the next attempt to refill obj stock it will be moved 2804 * to a per-cpu stock (probably, on an other CPU), see 2805 * refill_obj_stock(). 2806 * 2807 * How often it's flushed is a trade-off between the memory 2808 * limit enforcement accuracy and potential CPU contention, 2809 * so it might be changed in the future. 2810 */ 2811 atomic_add(nr_bytes, &old->nr_charged_bytes); 2812 stock->nr_bytes = 0; 2813 } 2814 2815 /* 2816 * Flush the vmstat data in current stock 2817 */ 2818 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 2819 if (stock->nr_slab_reclaimable_b) { 2820 __mod_objcg_mlstate(old, stock->cached_pgdat, 2821 NR_SLAB_RECLAIMABLE_B, 2822 stock->nr_slab_reclaimable_b); 2823 stock->nr_slab_reclaimable_b = 0; 2824 } 2825 if (stock->nr_slab_unreclaimable_b) { 2826 __mod_objcg_mlstate(old, stock->cached_pgdat, 2827 NR_SLAB_UNRECLAIMABLE_B, 2828 stock->nr_slab_unreclaimable_b); 2829 stock->nr_slab_unreclaimable_b = 0; 2830 } 2831 stock->cached_pgdat = NULL; 2832 } 2833 2834 WRITE_ONCE(stock->cached_objcg, NULL); 2835 /* 2836 * The `old' objects needs to be released by the caller via 2837 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. 2838 */ 2839 return old; 2840 } 2841 2842 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2843 struct mem_cgroup *root_memcg) 2844 { 2845 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); 2846 struct mem_cgroup *memcg; 2847 2848 if (objcg) { 2849 memcg = obj_cgroup_memcg(objcg); 2850 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 2851 return true; 2852 } 2853 2854 return false; 2855 } 2856 2857 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 2858 bool allow_uncharge) 2859 { 2860 struct memcg_stock_pcp *stock; 2861 struct obj_cgroup *old = NULL; 2862 unsigned long flags; 2863 unsigned int nr_pages = 0; 2864 2865 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2866 2867 stock = this_cpu_ptr(&memcg_stock); 2868 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ 2869 old = drain_obj_stock(stock); 2870 obj_cgroup_get(objcg); 2871 WRITE_ONCE(stock->cached_objcg, objcg); 2872 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 2873 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 2874 allow_uncharge = true; /* Allow uncharge when objcg changes */ 2875 } 2876 stock->nr_bytes += nr_bytes; 2877 2878 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 2879 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 2880 stock->nr_bytes &= (PAGE_SIZE - 1); 2881 } 2882 2883 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2884 obj_cgroup_put(old); 2885 2886 if (nr_pages) 2887 obj_cgroup_uncharge_pages(objcg, nr_pages); 2888 } 2889 2890 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 2891 { 2892 unsigned int nr_pages, nr_bytes; 2893 int ret; 2894 2895 if (consume_obj_stock(objcg, size)) 2896 return 0; 2897 2898 /* 2899 * In theory, objcg->nr_charged_bytes can have enough 2900 * pre-charged bytes to satisfy the allocation. However, 2901 * flushing objcg->nr_charged_bytes requires two atomic 2902 * operations, and objcg->nr_charged_bytes can't be big. 2903 * The shared objcg->nr_charged_bytes can also become a 2904 * performance bottleneck if all tasks of the same memcg are 2905 * trying to update it. So it's better to ignore it and try 2906 * grab some new pages. The stock's nr_bytes will be flushed to 2907 * objcg->nr_charged_bytes later on when objcg changes. 2908 * 2909 * The stock's nr_bytes may contain enough pre-charged bytes 2910 * to allow one less page from being charged, but we can't rely 2911 * on the pre-charged bytes not being changed outside of 2912 * consume_obj_stock() or refill_obj_stock(). So ignore those 2913 * pre-charged bytes as well when charging pages. To avoid a 2914 * page uncharge right after a page charge, we set the 2915 * allow_uncharge flag to false when calling refill_obj_stock() 2916 * to temporarily allow the pre-charged bytes to exceed the page 2917 * size limit. The maximum reachable value of the pre-charged 2918 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 2919 * race. 2920 */ 2921 nr_pages = size >> PAGE_SHIFT; 2922 nr_bytes = size & (PAGE_SIZE - 1); 2923 2924 if (nr_bytes) 2925 nr_pages += 1; 2926 2927 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 2928 if (!ret && nr_bytes) 2929 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 2930 2931 return ret; 2932 } 2933 2934 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 2935 { 2936 refill_obj_stock(objcg, size, true); 2937 } 2938 2939 static inline size_t obj_full_size(struct kmem_cache *s) 2940 { 2941 /* 2942 * For each accounted object there is an extra space which is used 2943 * to store obj_cgroup membership. Charge it too. 2944 */ 2945 return s->size + sizeof(struct obj_cgroup *); 2946 } 2947 2948 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2949 gfp_t flags, size_t size, void **p) 2950 { 2951 struct obj_cgroup *objcg; 2952 struct slab *slab; 2953 unsigned long off; 2954 size_t i; 2955 2956 /* 2957 * The obtained objcg pointer is safe to use within the current scope, 2958 * defined by current task or set_active_memcg() pair. 2959 * obj_cgroup_get() is used to get a permanent reference. 2960 */ 2961 objcg = current_obj_cgroup(); 2962 if (!objcg) 2963 return true; 2964 2965 /* 2966 * slab_alloc_node() avoids the NULL check, so we might be called with a 2967 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill 2968 * the whole requested size. 2969 * return success as there's nothing to free back 2970 */ 2971 if (unlikely(*p == NULL)) 2972 return true; 2973 2974 flags &= gfp_allowed_mask; 2975 2976 if (lru) { 2977 int ret; 2978 struct mem_cgroup *memcg; 2979 2980 memcg = get_mem_cgroup_from_objcg(objcg); 2981 ret = memcg_list_lru_alloc(memcg, lru, flags); 2982 css_put(&memcg->css); 2983 2984 if (ret) 2985 return false; 2986 } 2987 2988 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) 2989 return false; 2990 2991 for (i = 0; i < size; i++) { 2992 slab = virt_to_slab(p[i]); 2993 2994 if (!slab_obj_exts(slab) && 2995 alloc_slab_obj_exts(slab, s, flags, false)) { 2996 obj_cgroup_uncharge(objcg, obj_full_size(s)); 2997 continue; 2998 } 2999 3000 off = obj_to_index(s, slab, p[i]); 3001 obj_cgroup_get(objcg); 3002 slab_obj_exts(slab)[off].objcg = objcg; 3003 mod_objcg_state(objcg, slab_pgdat(slab), 3004 cache_vmstat_idx(s), obj_full_size(s)); 3005 } 3006 3007 return true; 3008 } 3009 3010 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 3011 void **p, int objects, struct slabobj_ext *obj_exts) 3012 { 3013 for (int i = 0; i < objects; i++) { 3014 struct obj_cgroup *objcg; 3015 unsigned int off; 3016 3017 off = obj_to_index(s, slab, p[i]); 3018 objcg = obj_exts[off].objcg; 3019 if (!objcg) 3020 continue; 3021 3022 obj_exts[off].objcg = NULL; 3023 obj_cgroup_uncharge(objcg, obj_full_size(s)); 3024 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 3025 -obj_full_size(s)); 3026 obj_cgroup_put(objcg); 3027 } 3028 } 3029 3030 /* 3031 * Because folio_memcg(head) is not set on tails, set it now. 3032 */ 3033 void split_page_memcg(struct page *head, int old_order, int new_order) 3034 { 3035 struct folio *folio = page_folio(head); 3036 int i; 3037 unsigned int old_nr = 1 << old_order; 3038 unsigned int new_nr = 1 << new_order; 3039 3040 if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) 3041 return; 3042 3043 for (i = new_nr; i < old_nr; i += new_nr) 3044 folio_page(folio, i)->memcg_data = folio->memcg_data; 3045 3046 if (folio_memcg_kmem(folio)) 3047 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); 3048 else 3049 css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1); 3050 } 3051 3052 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3053 { 3054 unsigned long val; 3055 3056 if (mem_cgroup_is_root(memcg)) { 3057 /* 3058 * Approximate root's usage from global state. This isn't 3059 * perfect, but the root usage was always an approximation. 3060 */ 3061 val = global_node_page_state(NR_FILE_PAGES) + 3062 global_node_page_state(NR_ANON_MAPPED); 3063 if (swap) 3064 val += total_swap_pages - get_nr_swap_pages(); 3065 } else { 3066 if (!swap) 3067 val = page_counter_read(&memcg->memory); 3068 else 3069 val = page_counter_read(&memcg->memsw); 3070 } 3071 return val; 3072 } 3073 3074 static int memcg_online_kmem(struct mem_cgroup *memcg) 3075 { 3076 struct obj_cgroup *objcg; 3077 3078 if (mem_cgroup_kmem_disabled()) 3079 return 0; 3080 3081 if (unlikely(mem_cgroup_is_root(memcg))) 3082 return 0; 3083 3084 objcg = obj_cgroup_alloc(); 3085 if (!objcg) 3086 return -ENOMEM; 3087 3088 objcg->memcg = memcg; 3089 rcu_assign_pointer(memcg->objcg, objcg); 3090 obj_cgroup_get(objcg); 3091 memcg->orig_objcg = objcg; 3092 3093 static_branch_enable(&memcg_kmem_online_key); 3094 3095 memcg->kmemcg_id = memcg->id.id; 3096 3097 return 0; 3098 } 3099 3100 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3101 { 3102 struct mem_cgroup *parent; 3103 3104 if (mem_cgroup_kmem_disabled()) 3105 return; 3106 3107 if (unlikely(mem_cgroup_is_root(memcg))) 3108 return; 3109 3110 parent = parent_mem_cgroup(memcg); 3111 if (!parent) 3112 parent = root_mem_cgroup; 3113 3114 memcg_reparent_list_lrus(memcg, parent); 3115 3116 /* 3117 * Objcg's reparenting must be after list_lru's, make sure list_lru 3118 * helpers won't use parent's list_lru until child is drained. 3119 */ 3120 memcg_reparent_objcgs(memcg, parent); 3121 } 3122 3123 #ifdef CONFIG_CGROUP_WRITEBACK 3124 3125 #include <trace/events/writeback.h> 3126 3127 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3128 { 3129 return wb_domain_init(&memcg->cgwb_domain, gfp); 3130 } 3131 3132 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3133 { 3134 wb_domain_exit(&memcg->cgwb_domain); 3135 } 3136 3137 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3138 { 3139 wb_domain_size_changed(&memcg->cgwb_domain); 3140 } 3141 3142 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3143 { 3144 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3145 3146 if (!memcg->css.parent) 3147 return NULL; 3148 3149 return &memcg->cgwb_domain; 3150 } 3151 3152 /** 3153 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3154 * @wb: bdi_writeback in question 3155 * @pfilepages: out parameter for number of file pages 3156 * @pheadroom: out parameter for number of allocatable pages according to memcg 3157 * @pdirty: out parameter for number of dirty pages 3158 * @pwriteback: out parameter for number of pages under writeback 3159 * 3160 * Determine the numbers of file, headroom, dirty, and writeback pages in 3161 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3162 * is a bit more involved. 3163 * 3164 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3165 * headroom is calculated as the lowest headroom of itself and the 3166 * ancestors. Note that this doesn't consider the actual amount of 3167 * available memory in the system. The caller should further cap 3168 * *@pheadroom accordingly. 3169 */ 3170 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3171 unsigned long *pheadroom, unsigned long *pdirty, 3172 unsigned long *pwriteback) 3173 { 3174 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3175 struct mem_cgroup *parent; 3176 3177 mem_cgroup_flush_stats_ratelimited(memcg); 3178 3179 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3180 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3181 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 3182 memcg_page_state(memcg, NR_ACTIVE_FILE); 3183 3184 *pheadroom = PAGE_COUNTER_MAX; 3185 while ((parent = parent_mem_cgroup(memcg))) { 3186 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 3187 READ_ONCE(memcg->memory.high)); 3188 unsigned long used = page_counter_read(&memcg->memory); 3189 3190 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3191 memcg = parent; 3192 } 3193 } 3194 3195 /* 3196 * Foreign dirty flushing 3197 * 3198 * There's an inherent mismatch between memcg and writeback. The former 3199 * tracks ownership per-page while the latter per-inode. This was a 3200 * deliberate design decision because honoring per-page ownership in the 3201 * writeback path is complicated, may lead to higher CPU and IO overheads 3202 * and deemed unnecessary given that write-sharing an inode across 3203 * different cgroups isn't a common use-case. 3204 * 3205 * Combined with inode majority-writer ownership switching, this works well 3206 * enough in most cases but there are some pathological cases. For 3207 * example, let's say there are two cgroups A and B which keep writing to 3208 * different but confined parts of the same inode. B owns the inode and 3209 * A's memory is limited far below B's. A's dirty ratio can rise enough to 3210 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 3211 * triggering background writeback. A will be slowed down without a way to 3212 * make writeback of the dirty pages happen. 3213 * 3214 * Conditions like the above can lead to a cgroup getting repeatedly and 3215 * severely throttled after making some progress after each 3216 * dirty_expire_interval while the underlying IO device is almost 3217 * completely idle. 3218 * 3219 * Solving this problem completely requires matching the ownership tracking 3220 * granularities between memcg and writeback in either direction. However, 3221 * the more egregious behaviors can be avoided by simply remembering the 3222 * most recent foreign dirtying events and initiating remote flushes on 3223 * them when local writeback isn't enough to keep the memory clean enough. 3224 * 3225 * The following two functions implement such mechanism. When a foreign 3226 * page - a page whose memcg and writeback ownerships don't match - is 3227 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 3228 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 3229 * decides that the memcg needs to sleep due to high dirty ratio, it calls 3230 * mem_cgroup_flush_foreign() which queues writeback on the recorded 3231 * foreign bdi_writebacks which haven't expired. Both the numbers of 3232 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 3233 * limited to MEMCG_CGWB_FRN_CNT. 3234 * 3235 * The mechanism only remembers IDs and doesn't hold any object references. 3236 * As being wrong occasionally doesn't matter, updates and accesses to the 3237 * records are lockless and racy. 3238 */ 3239 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 3240 struct bdi_writeback *wb) 3241 { 3242 struct mem_cgroup *memcg = folio_memcg(folio); 3243 struct memcg_cgwb_frn *frn; 3244 u64 now = get_jiffies_64(); 3245 u64 oldest_at = now; 3246 int oldest = -1; 3247 int i; 3248 3249 trace_track_foreign_dirty(folio, wb); 3250 3251 /* 3252 * Pick the slot to use. If there is already a slot for @wb, keep 3253 * using it. If not replace the oldest one which isn't being 3254 * written out. 3255 */ 3256 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3257 frn = &memcg->cgwb_frn[i]; 3258 if (frn->bdi_id == wb->bdi->id && 3259 frn->memcg_id == wb->memcg_css->id) 3260 break; 3261 if (time_before64(frn->at, oldest_at) && 3262 atomic_read(&frn->done.cnt) == 1) { 3263 oldest = i; 3264 oldest_at = frn->at; 3265 } 3266 } 3267 3268 if (i < MEMCG_CGWB_FRN_CNT) { 3269 /* 3270 * Re-using an existing one. Update timestamp lazily to 3271 * avoid making the cacheline hot. We want them to be 3272 * reasonably up-to-date and significantly shorter than 3273 * dirty_expire_interval as that's what expires the record. 3274 * Use the shorter of 1s and dirty_expire_interval / 8. 3275 */ 3276 unsigned long update_intv = 3277 min_t(unsigned long, HZ, 3278 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 3279 3280 if (time_before64(frn->at, now - update_intv)) 3281 frn->at = now; 3282 } else if (oldest >= 0) { 3283 /* replace the oldest free one */ 3284 frn = &memcg->cgwb_frn[oldest]; 3285 frn->bdi_id = wb->bdi->id; 3286 frn->memcg_id = wb->memcg_css->id; 3287 frn->at = now; 3288 } 3289 } 3290 3291 /* issue foreign writeback flushes for recorded foreign dirtying events */ 3292 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 3293 { 3294 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3295 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 3296 u64 now = jiffies_64; 3297 int i; 3298 3299 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3300 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 3301 3302 /* 3303 * If the record is older than dirty_expire_interval, 3304 * writeback on it has already started. No need to kick it 3305 * off again. Also, don't start a new one if there's 3306 * already one in flight. 3307 */ 3308 if (time_after64(frn->at, now - intv) && 3309 atomic_read(&frn->done.cnt) == 1) { 3310 frn->at = 0; 3311 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 3312 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 3313 WB_REASON_FOREIGN_FLUSH, 3314 &frn->done); 3315 } 3316 } 3317 } 3318 3319 #else /* CONFIG_CGROUP_WRITEBACK */ 3320 3321 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3322 { 3323 return 0; 3324 } 3325 3326 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3327 { 3328 } 3329 3330 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3331 { 3332 } 3333 3334 #endif /* CONFIG_CGROUP_WRITEBACK */ 3335 3336 /* 3337 * Private memory cgroup IDR 3338 * 3339 * Swap-out records and page cache shadow entries need to store memcg 3340 * references in constrained space, so we maintain an ID space that is 3341 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 3342 * memory-controlled cgroups to 64k. 3343 * 3344 * However, there usually are many references to the offline CSS after 3345 * the cgroup has been destroyed, such as page cache or reclaimable 3346 * slab objects, that don't need to hang on to the ID. We want to keep 3347 * those dead CSS from occupying IDs, or we might quickly exhaust the 3348 * relatively small ID space and prevent the creation of new cgroups 3349 * even when there are much fewer than 64k cgroups - possibly none. 3350 * 3351 * Maintain a private 16-bit ID space for memcg, and allow the ID to 3352 * be freed and recycled when it's no longer needed, which is usually 3353 * when the CSS is offlined. 3354 * 3355 * The only exception to that are records of swapped out tmpfs/shmem 3356 * pages that need to be attributed to live ancestors on swapin. But 3357 * those references are manageable from userspace. 3358 */ 3359 3360 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 3361 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids); 3362 3363 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 3364 { 3365 if (memcg->id.id > 0) { 3366 xa_erase(&mem_cgroup_ids, memcg->id.id); 3367 memcg->id.id = 0; 3368 } 3369 } 3370 3371 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 3372 unsigned int n) 3373 { 3374 refcount_add(n, &memcg->id.ref); 3375 } 3376 3377 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 3378 { 3379 if (refcount_sub_and_test(n, &memcg->id.ref)) { 3380 mem_cgroup_id_remove(memcg); 3381 3382 /* Memcg ID pins CSS */ 3383 css_put(&memcg->css); 3384 } 3385 } 3386 3387 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 3388 { 3389 mem_cgroup_id_put_many(memcg, 1); 3390 } 3391 3392 /** 3393 * mem_cgroup_from_id - look up a memcg from a memcg id 3394 * @id: the memcg id to look up 3395 * 3396 * Caller must hold rcu_read_lock(). 3397 */ 3398 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 3399 { 3400 WARN_ON_ONCE(!rcu_read_lock_held()); 3401 return xa_load(&mem_cgroup_ids, id); 3402 } 3403 3404 #ifdef CONFIG_SHRINKER_DEBUG 3405 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 3406 { 3407 struct cgroup *cgrp; 3408 struct cgroup_subsys_state *css; 3409 struct mem_cgroup *memcg; 3410 3411 cgrp = cgroup_get_from_id(ino); 3412 if (IS_ERR(cgrp)) 3413 return ERR_CAST(cgrp); 3414 3415 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 3416 if (css) 3417 memcg = container_of(css, struct mem_cgroup, css); 3418 else 3419 memcg = ERR_PTR(-ENOENT); 3420 3421 cgroup_put(cgrp); 3422 3423 return memcg; 3424 } 3425 #endif 3426 3427 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 3428 { 3429 struct mem_cgroup_per_node *pn; 3430 3431 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node); 3432 if (!pn) 3433 return false; 3434 3435 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), 3436 GFP_KERNEL_ACCOUNT, node); 3437 if (!pn->lruvec_stats) 3438 goto fail; 3439 3440 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 3441 GFP_KERNEL_ACCOUNT); 3442 if (!pn->lruvec_stats_percpu) 3443 goto fail; 3444 3445 lruvec_init(&pn->lruvec); 3446 pn->memcg = memcg; 3447 3448 memcg->nodeinfo[node] = pn; 3449 return true; 3450 fail: 3451 kfree(pn->lruvec_stats); 3452 kfree(pn); 3453 return false; 3454 } 3455 3456 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 3457 { 3458 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3459 3460 if (!pn) 3461 return; 3462 3463 free_percpu(pn->lruvec_stats_percpu); 3464 kfree(pn->lruvec_stats); 3465 kfree(pn); 3466 } 3467 3468 static void __mem_cgroup_free(struct mem_cgroup *memcg) 3469 { 3470 int node; 3471 3472 obj_cgroup_put(memcg->orig_objcg); 3473 3474 for_each_node(node) 3475 free_mem_cgroup_per_node_info(memcg, node); 3476 memcg1_free_events(memcg); 3477 kfree(memcg->vmstats); 3478 free_percpu(memcg->vmstats_percpu); 3479 kfree(memcg); 3480 } 3481 3482 static void mem_cgroup_free(struct mem_cgroup *memcg) 3483 { 3484 lru_gen_exit_memcg(memcg); 3485 memcg_wb_domain_exit(memcg); 3486 __mem_cgroup_free(memcg); 3487 } 3488 3489 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) 3490 { 3491 struct memcg_vmstats_percpu *statc, *pstatc; 3492 struct mem_cgroup *memcg; 3493 int node, cpu; 3494 int __maybe_unused i; 3495 long error; 3496 3497 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); 3498 if (!memcg) 3499 return ERR_PTR(-ENOMEM); 3500 3501 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, 3502 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL); 3503 if (error) 3504 goto fail; 3505 error = -ENOMEM; 3506 3507 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), 3508 GFP_KERNEL_ACCOUNT); 3509 if (!memcg->vmstats) 3510 goto fail; 3511 3512 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 3513 GFP_KERNEL_ACCOUNT); 3514 if (!memcg->vmstats_percpu) 3515 goto fail; 3516 3517 if (!memcg1_alloc_events(memcg)) 3518 goto fail; 3519 3520 for_each_possible_cpu(cpu) { 3521 if (parent) 3522 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); 3523 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3524 statc->parent = parent ? pstatc : NULL; 3525 statc->vmstats = memcg->vmstats; 3526 } 3527 3528 for_each_node(node) 3529 if (!alloc_mem_cgroup_per_node_info(memcg, node)) 3530 goto fail; 3531 3532 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 3533 goto fail; 3534 3535 INIT_WORK(&memcg->high_work, high_work_func); 3536 vmpressure_init(&memcg->vmpressure); 3537 INIT_LIST_HEAD(&memcg->memory_peaks); 3538 INIT_LIST_HEAD(&memcg->swap_peaks); 3539 spin_lock_init(&memcg->peaks_lock); 3540 memcg->socket_pressure = jiffies; 3541 memcg1_memcg_init(memcg); 3542 memcg->kmemcg_id = -1; 3543 INIT_LIST_HEAD(&memcg->objcg_list); 3544 #ifdef CONFIG_CGROUP_WRITEBACK 3545 INIT_LIST_HEAD(&memcg->cgwb_list); 3546 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3547 memcg->cgwb_frn[i].done = 3548 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 3549 #endif 3550 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3551 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 3552 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 3553 memcg->deferred_split_queue.split_queue_len = 0; 3554 #endif 3555 lru_gen_init_memcg(memcg); 3556 return memcg; 3557 fail: 3558 mem_cgroup_id_remove(memcg); 3559 __mem_cgroup_free(memcg); 3560 return ERR_PTR(error); 3561 } 3562 3563 static struct cgroup_subsys_state * __ref 3564 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 3565 { 3566 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 3567 struct mem_cgroup *memcg, *old_memcg; 3568 3569 old_memcg = set_active_memcg(parent); 3570 memcg = mem_cgroup_alloc(parent); 3571 set_active_memcg(old_memcg); 3572 if (IS_ERR(memcg)) 3573 return ERR_CAST(memcg); 3574 3575 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3576 memcg1_soft_limit_reset(memcg); 3577 #ifdef CONFIG_ZSWAP 3578 memcg->zswap_max = PAGE_COUNTER_MAX; 3579 WRITE_ONCE(memcg->zswap_writeback, true); 3580 #endif 3581 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3582 if (parent) { 3583 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 3584 3585 page_counter_init(&memcg->memory, &parent->memory, true); 3586 page_counter_init(&memcg->swap, &parent->swap, false); 3587 #ifdef CONFIG_MEMCG_V1 3588 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 3589 page_counter_init(&memcg->kmem, &parent->kmem, false); 3590 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); 3591 #endif 3592 } else { 3593 init_memcg_stats(); 3594 init_memcg_events(); 3595 page_counter_init(&memcg->memory, NULL, true); 3596 page_counter_init(&memcg->swap, NULL, false); 3597 #ifdef CONFIG_MEMCG_V1 3598 page_counter_init(&memcg->kmem, NULL, false); 3599 page_counter_init(&memcg->tcpmem, NULL, false); 3600 #endif 3601 root_mem_cgroup = memcg; 3602 return &memcg->css; 3603 } 3604 3605 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 3606 static_branch_inc(&memcg_sockets_enabled_key); 3607 3608 if (!cgroup_memory_nobpf) 3609 static_branch_inc(&memcg_bpf_enabled_key); 3610 3611 return &memcg->css; 3612 } 3613 3614 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 3615 { 3616 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3617 3618 if (memcg_online_kmem(memcg)) 3619 goto remove_id; 3620 3621 /* 3622 * A memcg must be visible for expand_shrinker_info() 3623 * by the time the maps are allocated. So, we allocate maps 3624 * here, when for_each_mem_cgroup() can't skip it. 3625 */ 3626 if (alloc_shrinker_info(memcg)) 3627 goto offline_kmem; 3628 3629 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) 3630 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 3631 FLUSH_TIME); 3632 lru_gen_online_memcg(memcg); 3633 3634 /* Online state pins memcg ID, memcg ID pins CSS */ 3635 refcount_set(&memcg->id.ref, 1); 3636 css_get(css); 3637 3638 /* 3639 * Ensure mem_cgroup_from_id() works once we're fully online. 3640 * 3641 * We could do this earlier and require callers to filter with 3642 * css_tryget_online(). But right now there are no users that 3643 * need earlier access, and the workingset code relies on the 3644 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So 3645 * publish it here at the end of onlining. This matches the 3646 * regular ID destruction during offlining. 3647 */ 3648 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); 3649 3650 return 0; 3651 offline_kmem: 3652 memcg_offline_kmem(memcg); 3653 remove_id: 3654 mem_cgroup_id_remove(memcg); 3655 return -ENOMEM; 3656 } 3657 3658 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 3659 { 3660 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3661 3662 memcg1_css_offline(memcg); 3663 3664 page_counter_set_min(&memcg->memory, 0); 3665 page_counter_set_low(&memcg->memory, 0); 3666 3667 zswap_memcg_offline_cleanup(memcg); 3668 3669 memcg_offline_kmem(memcg); 3670 reparent_shrinker_deferred(memcg); 3671 wb_memcg_offline(memcg); 3672 lru_gen_offline_memcg(memcg); 3673 3674 drain_all_stock(memcg); 3675 3676 mem_cgroup_id_put(memcg); 3677 } 3678 3679 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 3680 { 3681 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3682 3683 invalidate_reclaim_iterators(memcg); 3684 lru_gen_release_memcg(memcg); 3685 } 3686 3687 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 3688 { 3689 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3690 int __maybe_unused i; 3691 3692 #ifdef CONFIG_CGROUP_WRITEBACK 3693 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3694 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 3695 #endif 3696 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 3697 static_branch_dec(&memcg_sockets_enabled_key); 3698 3699 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) 3700 static_branch_dec(&memcg_sockets_enabled_key); 3701 3702 if (!cgroup_memory_nobpf) 3703 static_branch_dec(&memcg_bpf_enabled_key); 3704 3705 vmpressure_cleanup(&memcg->vmpressure); 3706 cancel_work_sync(&memcg->high_work); 3707 memcg1_remove_from_trees(memcg); 3708 free_shrinker_info(memcg); 3709 mem_cgroup_free(memcg); 3710 } 3711 3712 /** 3713 * mem_cgroup_css_reset - reset the states of a mem_cgroup 3714 * @css: the target css 3715 * 3716 * Reset the states of the mem_cgroup associated with @css. This is 3717 * invoked when the userland requests disabling on the default hierarchy 3718 * but the memcg is pinned through dependency. The memcg should stop 3719 * applying policies and should revert to the vanilla state as it may be 3720 * made visible again. 3721 * 3722 * The current implementation only resets the essential configurations. 3723 * This needs to be expanded to cover all the visible parts. 3724 */ 3725 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 3726 { 3727 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3728 3729 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 3730 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 3731 #ifdef CONFIG_MEMCG_V1 3732 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 3733 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 3734 #endif 3735 page_counter_set_min(&memcg->memory, 0); 3736 page_counter_set_low(&memcg->memory, 0); 3737 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3738 memcg1_soft_limit_reset(memcg); 3739 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3740 memcg_wb_domain_size_changed(memcg); 3741 } 3742 3743 struct aggregate_control { 3744 /* pointer to the aggregated (CPU and subtree aggregated) counters */ 3745 long *aggregate; 3746 /* pointer to the non-hierarchichal (CPU aggregated) counters */ 3747 long *local; 3748 /* pointer to the pending child counters during tree propagation */ 3749 long *pending; 3750 /* pointer to the parent's pending counters, could be NULL */ 3751 long *ppending; 3752 /* pointer to the percpu counters to be aggregated */ 3753 long *cstat; 3754 /* pointer to the percpu counters of the last aggregation*/ 3755 long *cstat_prev; 3756 /* size of the above counters */ 3757 int size; 3758 }; 3759 3760 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac) 3761 { 3762 int i; 3763 long delta, delta_cpu, v; 3764 3765 for (i = 0; i < ac->size; i++) { 3766 /* 3767 * Collect the aggregated propagation counts of groups 3768 * below us. We're in a per-cpu loop here and this is 3769 * a global counter, so the first cycle will get them. 3770 */ 3771 delta = ac->pending[i]; 3772 if (delta) 3773 ac->pending[i] = 0; 3774 3775 /* Add CPU changes on this level since the last flush */ 3776 delta_cpu = 0; 3777 v = READ_ONCE(ac->cstat[i]); 3778 if (v != ac->cstat_prev[i]) { 3779 delta_cpu = v - ac->cstat_prev[i]; 3780 delta += delta_cpu; 3781 ac->cstat_prev[i] = v; 3782 } 3783 3784 /* Aggregate counts on this level and propagate upwards */ 3785 if (delta_cpu) 3786 ac->local[i] += delta_cpu; 3787 3788 if (delta) { 3789 ac->aggregate[i] += delta; 3790 if (ac->ppending) 3791 ac->ppending[i] += delta; 3792 } 3793 } 3794 } 3795 3796 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 3797 { 3798 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3799 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 3800 struct memcg_vmstats_percpu *statc; 3801 struct aggregate_control ac; 3802 int nid; 3803 3804 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3805 3806 ac = (struct aggregate_control) { 3807 .aggregate = memcg->vmstats->state, 3808 .local = memcg->vmstats->state_local, 3809 .pending = memcg->vmstats->state_pending, 3810 .ppending = parent ? parent->vmstats->state_pending : NULL, 3811 .cstat = statc->state, 3812 .cstat_prev = statc->state_prev, 3813 .size = MEMCG_VMSTAT_SIZE, 3814 }; 3815 mem_cgroup_stat_aggregate(&ac); 3816 3817 ac = (struct aggregate_control) { 3818 .aggregate = memcg->vmstats->events, 3819 .local = memcg->vmstats->events_local, 3820 .pending = memcg->vmstats->events_pending, 3821 .ppending = parent ? parent->vmstats->events_pending : NULL, 3822 .cstat = statc->events, 3823 .cstat_prev = statc->events_prev, 3824 .size = NR_MEMCG_EVENTS, 3825 }; 3826 mem_cgroup_stat_aggregate(&ac); 3827 3828 for_each_node_state(nid, N_MEMORY) { 3829 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 3830 struct lruvec_stats *lstats = pn->lruvec_stats; 3831 struct lruvec_stats *plstats = NULL; 3832 struct lruvec_stats_percpu *lstatc; 3833 3834 if (parent) 3835 plstats = parent->nodeinfo[nid]->lruvec_stats; 3836 3837 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 3838 3839 ac = (struct aggregate_control) { 3840 .aggregate = lstats->state, 3841 .local = lstats->state_local, 3842 .pending = lstats->state_pending, 3843 .ppending = plstats ? plstats->state_pending : NULL, 3844 .cstat = lstatc->state, 3845 .cstat_prev = lstatc->state_prev, 3846 .size = NR_MEMCG_NODE_STAT_ITEMS, 3847 }; 3848 mem_cgroup_stat_aggregate(&ac); 3849 3850 } 3851 WRITE_ONCE(statc->stats_updates, 0); 3852 /* We are in a per-cpu loop here, only do the atomic write once */ 3853 if (atomic64_read(&memcg->vmstats->stats_updates)) 3854 atomic64_set(&memcg->vmstats->stats_updates, 0); 3855 } 3856 3857 static void mem_cgroup_fork(struct task_struct *task) 3858 { 3859 /* 3860 * Set the update flag to cause task->objcg to be initialized lazily 3861 * on the first allocation. It can be done without any synchronization 3862 * because it's always performed on the current task, so does 3863 * current_objcg_update(). 3864 */ 3865 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; 3866 } 3867 3868 static void mem_cgroup_exit(struct task_struct *task) 3869 { 3870 struct obj_cgroup *objcg = task->objcg; 3871 3872 objcg = (struct obj_cgroup *) 3873 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); 3874 obj_cgroup_put(objcg); 3875 3876 /* 3877 * Some kernel allocations can happen after this point, 3878 * but let's ignore them. It can be done without any synchronization 3879 * because it's always performed on the current task, so does 3880 * current_objcg_update(). 3881 */ 3882 task->objcg = NULL; 3883 } 3884 3885 #ifdef CONFIG_LRU_GEN 3886 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) 3887 { 3888 struct task_struct *task; 3889 struct cgroup_subsys_state *css; 3890 3891 /* find the first leader if there is any */ 3892 cgroup_taskset_for_each_leader(task, css, tset) 3893 break; 3894 3895 if (!task) 3896 return; 3897 3898 task_lock(task); 3899 if (task->mm && READ_ONCE(task->mm->owner) == task) 3900 lru_gen_migrate_mm(task->mm); 3901 task_unlock(task); 3902 } 3903 #else 3904 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {} 3905 #endif /* CONFIG_LRU_GEN */ 3906 3907 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) 3908 { 3909 struct task_struct *task; 3910 struct cgroup_subsys_state *css; 3911 3912 cgroup_taskset_for_each(task, css, tset) { 3913 /* atomically set the update bit */ 3914 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); 3915 } 3916 } 3917 3918 static void mem_cgroup_attach(struct cgroup_taskset *tset) 3919 { 3920 mem_cgroup_lru_gen_attach(tset); 3921 mem_cgroup_kmem_attach(tset); 3922 } 3923 3924 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 3925 { 3926 if (value == PAGE_COUNTER_MAX) 3927 seq_puts(m, "max\n"); 3928 else 3929 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 3930 3931 return 0; 3932 } 3933 3934 static u64 memory_current_read(struct cgroup_subsys_state *css, 3935 struct cftype *cft) 3936 { 3937 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3938 3939 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 3940 } 3941 3942 #define OFP_PEAK_UNSET (((-1UL))) 3943 3944 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc) 3945 { 3946 struct cgroup_of_peak *ofp = of_peak(sf->private); 3947 u64 fd_peak = READ_ONCE(ofp->value), peak; 3948 3949 /* User wants global or local peak? */ 3950 if (fd_peak == OFP_PEAK_UNSET) 3951 peak = pc->watermark; 3952 else 3953 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); 3954 3955 seq_printf(sf, "%llu\n", peak * PAGE_SIZE); 3956 return 0; 3957 } 3958 3959 static int memory_peak_show(struct seq_file *sf, void *v) 3960 { 3961 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3962 3963 return peak_show(sf, v, &memcg->memory); 3964 } 3965 3966 static int peak_open(struct kernfs_open_file *of) 3967 { 3968 struct cgroup_of_peak *ofp = of_peak(of); 3969 3970 ofp->value = OFP_PEAK_UNSET; 3971 return 0; 3972 } 3973 3974 static void peak_release(struct kernfs_open_file *of) 3975 { 3976 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3977 struct cgroup_of_peak *ofp = of_peak(of); 3978 3979 if (ofp->value == OFP_PEAK_UNSET) { 3980 /* fast path (no writes on this fd) */ 3981 return; 3982 } 3983 spin_lock(&memcg->peaks_lock); 3984 list_del(&ofp->list); 3985 spin_unlock(&memcg->peaks_lock); 3986 } 3987 3988 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, 3989 loff_t off, struct page_counter *pc, 3990 struct list_head *watchers) 3991 { 3992 unsigned long usage; 3993 struct cgroup_of_peak *peer_ctx; 3994 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3995 struct cgroup_of_peak *ofp = of_peak(of); 3996 3997 spin_lock(&memcg->peaks_lock); 3998 3999 usage = page_counter_read(pc); 4000 WRITE_ONCE(pc->local_watermark, usage); 4001 4002 list_for_each_entry(peer_ctx, watchers, list) 4003 if (usage > peer_ctx->value) 4004 WRITE_ONCE(peer_ctx->value, usage); 4005 4006 /* initial write, register watcher */ 4007 if (ofp->value == -1) 4008 list_add(&ofp->list, watchers); 4009 4010 WRITE_ONCE(ofp->value, usage); 4011 spin_unlock(&memcg->peaks_lock); 4012 4013 return nbytes; 4014 } 4015 4016 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf, 4017 size_t nbytes, loff_t off) 4018 { 4019 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4020 4021 return peak_write(of, buf, nbytes, off, &memcg->memory, 4022 &memcg->memory_peaks); 4023 } 4024 4025 #undef OFP_PEAK_UNSET 4026 4027 static int memory_min_show(struct seq_file *m, void *v) 4028 { 4029 return seq_puts_memcg_tunable(m, 4030 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 4031 } 4032 4033 static ssize_t memory_min_write(struct kernfs_open_file *of, 4034 char *buf, size_t nbytes, loff_t off) 4035 { 4036 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4037 unsigned long min; 4038 int err; 4039 4040 buf = strstrip(buf); 4041 err = page_counter_memparse(buf, "max", &min); 4042 if (err) 4043 return err; 4044 4045 page_counter_set_min(&memcg->memory, min); 4046 4047 return nbytes; 4048 } 4049 4050 static int memory_low_show(struct seq_file *m, void *v) 4051 { 4052 return seq_puts_memcg_tunable(m, 4053 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 4054 } 4055 4056 static ssize_t memory_low_write(struct kernfs_open_file *of, 4057 char *buf, size_t nbytes, loff_t off) 4058 { 4059 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4060 unsigned long low; 4061 int err; 4062 4063 buf = strstrip(buf); 4064 err = page_counter_memparse(buf, "max", &low); 4065 if (err) 4066 return err; 4067 4068 page_counter_set_low(&memcg->memory, low); 4069 4070 return nbytes; 4071 } 4072 4073 static int memory_high_show(struct seq_file *m, void *v) 4074 { 4075 return seq_puts_memcg_tunable(m, 4076 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 4077 } 4078 4079 static ssize_t memory_high_write(struct kernfs_open_file *of, 4080 char *buf, size_t nbytes, loff_t off) 4081 { 4082 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4083 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4084 bool drained = false; 4085 unsigned long high; 4086 int err; 4087 4088 buf = strstrip(buf); 4089 err = page_counter_memparse(buf, "max", &high); 4090 if (err) 4091 return err; 4092 4093 page_counter_set_high(&memcg->memory, high); 4094 4095 for (;;) { 4096 unsigned long nr_pages = page_counter_read(&memcg->memory); 4097 unsigned long reclaimed; 4098 4099 if (nr_pages <= high) 4100 break; 4101 4102 if (signal_pending(current)) 4103 break; 4104 4105 if (!drained) { 4106 drain_all_stock(memcg); 4107 drained = true; 4108 continue; 4109 } 4110 4111 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 4112 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); 4113 4114 if (!reclaimed && !nr_retries--) 4115 break; 4116 } 4117 4118 memcg_wb_domain_size_changed(memcg); 4119 return nbytes; 4120 } 4121 4122 static int memory_max_show(struct seq_file *m, void *v) 4123 { 4124 return seq_puts_memcg_tunable(m, 4125 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 4126 } 4127 4128 static ssize_t memory_max_write(struct kernfs_open_file *of, 4129 char *buf, size_t nbytes, loff_t off) 4130 { 4131 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4132 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 4133 bool drained = false; 4134 unsigned long max; 4135 int err; 4136 4137 buf = strstrip(buf); 4138 err = page_counter_memparse(buf, "max", &max); 4139 if (err) 4140 return err; 4141 4142 xchg(&memcg->memory.max, max); 4143 4144 for (;;) { 4145 unsigned long nr_pages = page_counter_read(&memcg->memory); 4146 4147 if (nr_pages <= max) 4148 break; 4149 4150 if (signal_pending(current)) 4151 break; 4152 4153 if (!drained) { 4154 drain_all_stock(memcg); 4155 drained = true; 4156 continue; 4157 } 4158 4159 if (nr_reclaims) { 4160 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 4161 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) 4162 nr_reclaims--; 4163 continue; 4164 } 4165 4166 memcg_memory_event(memcg, MEMCG_OOM); 4167 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 4168 break; 4169 } 4170 4171 memcg_wb_domain_size_changed(memcg); 4172 return nbytes; 4173 } 4174 4175 /* 4176 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener' 4177 * if any new events become available. 4178 */ 4179 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 4180 { 4181 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 4182 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 4183 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 4184 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 4185 seq_printf(m, "oom_kill %lu\n", 4186 atomic_long_read(&events[MEMCG_OOM_KILL])); 4187 seq_printf(m, "oom_group_kill %lu\n", 4188 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 4189 } 4190 4191 static int memory_events_show(struct seq_file *m, void *v) 4192 { 4193 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4194 4195 __memory_events_show(m, memcg->memory_events); 4196 return 0; 4197 } 4198 4199 static int memory_events_local_show(struct seq_file *m, void *v) 4200 { 4201 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4202 4203 __memory_events_show(m, memcg->memory_events_local); 4204 return 0; 4205 } 4206 4207 int memory_stat_show(struct seq_file *m, void *v) 4208 { 4209 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4210 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL); 4211 struct seq_buf s; 4212 4213 if (!buf) 4214 return -ENOMEM; 4215 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 4216 memory_stat_format(memcg, &s); 4217 seq_puts(m, buf); 4218 kfree(buf); 4219 return 0; 4220 } 4221 4222 #ifdef CONFIG_NUMA 4223 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 4224 int item) 4225 { 4226 return lruvec_page_state(lruvec, item) * 4227 memcg_page_state_output_unit(item); 4228 } 4229 4230 static int memory_numa_stat_show(struct seq_file *m, void *v) 4231 { 4232 int i; 4233 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4234 4235 mem_cgroup_flush_stats(memcg); 4236 4237 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 4238 int nid; 4239 4240 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 4241 continue; 4242 4243 seq_printf(m, "%s", memory_stats[i].name); 4244 for_each_node_state(nid, N_MEMORY) { 4245 u64 size; 4246 struct lruvec *lruvec; 4247 4248 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 4249 size = lruvec_page_state_output(lruvec, 4250 memory_stats[i].idx); 4251 seq_printf(m, " N%d=%llu", nid, size); 4252 } 4253 seq_putc(m, '\n'); 4254 } 4255 4256 return 0; 4257 } 4258 #endif 4259 4260 static int memory_oom_group_show(struct seq_file *m, void *v) 4261 { 4262 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4263 4264 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 4265 4266 return 0; 4267 } 4268 4269 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 4270 char *buf, size_t nbytes, loff_t off) 4271 { 4272 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4273 int ret, oom_group; 4274 4275 buf = strstrip(buf); 4276 if (!buf) 4277 return -EINVAL; 4278 4279 ret = kstrtoint(buf, 0, &oom_group); 4280 if (ret) 4281 return ret; 4282 4283 if (oom_group != 0 && oom_group != 1) 4284 return -EINVAL; 4285 4286 WRITE_ONCE(memcg->oom_group, oom_group); 4287 4288 return nbytes; 4289 } 4290 4291 enum { 4292 MEMORY_RECLAIM_SWAPPINESS = 0, 4293 MEMORY_RECLAIM_NULL, 4294 }; 4295 4296 static const match_table_t tokens = { 4297 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"}, 4298 { MEMORY_RECLAIM_NULL, NULL }, 4299 }; 4300 4301 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 4302 size_t nbytes, loff_t off) 4303 { 4304 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4305 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4306 unsigned long nr_to_reclaim, nr_reclaimed = 0; 4307 int swappiness = -1; 4308 unsigned int reclaim_options; 4309 char *old_buf, *start; 4310 substring_t args[MAX_OPT_ARGS]; 4311 4312 buf = strstrip(buf); 4313 4314 old_buf = buf; 4315 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE; 4316 if (buf == old_buf) 4317 return -EINVAL; 4318 4319 buf = strstrip(buf); 4320 4321 while ((start = strsep(&buf, " ")) != NULL) { 4322 if (!strlen(start)) 4323 continue; 4324 switch (match_token(start, tokens, args)) { 4325 case MEMORY_RECLAIM_SWAPPINESS: 4326 if (match_int(&args[0], &swappiness)) 4327 return -EINVAL; 4328 if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS) 4329 return -EINVAL; 4330 break; 4331 default: 4332 return -EINVAL; 4333 } 4334 } 4335 4336 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; 4337 while (nr_reclaimed < nr_to_reclaim) { 4338 /* Will converge on zero, but reclaim enforces a minimum */ 4339 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4; 4340 unsigned long reclaimed; 4341 4342 if (signal_pending(current)) 4343 return -EINTR; 4344 4345 /* 4346 * This is the final attempt, drain percpu lru caches in the 4347 * hope of introducing more evictable pages for 4348 * try_to_free_mem_cgroup_pages(). 4349 */ 4350 if (!nr_retries) 4351 lru_add_drain_all(); 4352 4353 reclaimed = try_to_free_mem_cgroup_pages(memcg, 4354 batch_size, GFP_KERNEL, 4355 reclaim_options, 4356 swappiness == -1 ? NULL : &swappiness); 4357 4358 if (!reclaimed && !nr_retries--) 4359 return -EAGAIN; 4360 4361 nr_reclaimed += reclaimed; 4362 } 4363 4364 return nbytes; 4365 } 4366 4367 static struct cftype memory_files[] = { 4368 { 4369 .name = "current", 4370 .flags = CFTYPE_NOT_ON_ROOT, 4371 .read_u64 = memory_current_read, 4372 }, 4373 { 4374 .name = "peak", 4375 .flags = CFTYPE_NOT_ON_ROOT, 4376 .open = peak_open, 4377 .release = peak_release, 4378 .seq_show = memory_peak_show, 4379 .write = memory_peak_write, 4380 }, 4381 { 4382 .name = "min", 4383 .flags = CFTYPE_NOT_ON_ROOT, 4384 .seq_show = memory_min_show, 4385 .write = memory_min_write, 4386 }, 4387 { 4388 .name = "low", 4389 .flags = CFTYPE_NOT_ON_ROOT, 4390 .seq_show = memory_low_show, 4391 .write = memory_low_write, 4392 }, 4393 { 4394 .name = "high", 4395 .flags = CFTYPE_NOT_ON_ROOT, 4396 .seq_show = memory_high_show, 4397 .write = memory_high_write, 4398 }, 4399 { 4400 .name = "max", 4401 .flags = CFTYPE_NOT_ON_ROOT, 4402 .seq_show = memory_max_show, 4403 .write = memory_max_write, 4404 }, 4405 { 4406 .name = "events", 4407 .flags = CFTYPE_NOT_ON_ROOT, 4408 .file_offset = offsetof(struct mem_cgroup, events_file), 4409 .seq_show = memory_events_show, 4410 }, 4411 { 4412 .name = "events.local", 4413 .flags = CFTYPE_NOT_ON_ROOT, 4414 .file_offset = offsetof(struct mem_cgroup, events_local_file), 4415 .seq_show = memory_events_local_show, 4416 }, 4417 { 4418 .name = "stat", 4419 .seq_show = memory_stat_show, 4420 }, 4421 #ifdef CONFIG_NUMA 4422 { 4423 .name = "numa_stat", 4424 .seq_show = memory_numa_stat_show, 4425 }, 4426 #endif 4427 { 4428 .name = "oom.group", 4429 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 4430 .seq_show = memory_oom_group_show, 4431 .write = memory_oom_group_write, 4432 }, 4433 { 4434 .name = "reclaim", 4435 .flags = CFTYPE_NS_DELEGATABLE, 4436 .write = memory_reclaim, 4437 }, 4438 { } /* terminate */ 4439 }; 4440 4441 struct cgroup_subsys memory_cgrp_subsys = { 4442 .css_alloc = mem_cgroup_css_alloc, 4443 .css_online = mem_cgroup_css_online, 4444 .css_offline = mem_cgroup_css_offline, 4445 .css_released = mem_cgroup_css_released, 4446 .css_free = mem_cgroup_css_free, 4447 .css_reset = mem_cgroup_css_reset, 4448 .css_rstat_flush = mem_cgroup_css_rstat_flush, 4449 .attach = mem_cgroup_attach, 4450 .fork = mem_cgroup_fork, 4451 .exit = mem_cgroup_exit, 4452 .dfl_cftypes = memory_files, 4453 #ifdef CONFIG_MEMCG_V1 4454 .legacy_cftypes = mem_cgroup_legacy_files, 4455 #endif 4456 .early_init = 0, 4457 }; 4458 4459 /** 4460 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 4461 * @root: the top ancestor of the sub-tree being checked 4462 * @memcg: the memory cgroup to check 4463 * 4464 * WARNING: This function is not stateless! It can only be used as part 4465 * of a top-down tree iteration, not for isolated queries. 4466 */ 4467 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 4468 struct mem_cgroup *memcg) 4469 { 4470 bool recursive_protection = 4471 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT; 4472 4473 if (mem_cgroup_disabled()) 4474 return; 4475 4476 if (!root) 4477 root = root_mem_cgroup; 4478 4479 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); 4480 } 4481 4482 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 4483 gfp_t gfp) 4484 { 4485 int ret; 4486 4487 ret = try_charge(memcg, gfp, folio_nr_pages(folio)); 4488 if (ret) 4489 goto out; 4490 4491 css_get(&memcg->css); 4492 commit_charge(folio, memcg); 4493 memcg1_commit_charge(folio, memcg); 4494 out: 4495 return ret; 4496 } 4497 4498 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 4499 { 4500 struct mem_cgroup *memcg; 4501 int ret; 4502 4503 memcg = get_mem_cgroup_from_mm(mm); 4504 ret = charge_memcg(folio, memcg, gfp); 4505 css_put(&memcg->css); 4506 4507 return ret; 4508 } 4509 4510 /** 4511 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio 4512 * @folio: folio being charged 4513 * @gfp: reclaim mode 4514 * 4515 * This function is called when allocating a huge page folio, after the page has 4516 * already been obtained and charged to the appropriate hugetlb cgroup 4517 * controller (if it is enabled). 4518 * 4519 * Returns ENOMEM if the memcg is already full. 4520 * Returns 0 if either the charge was successful, or if we skip the charging. 4521 */ 4522 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) 4523 { 4524 struct mem_cgroup *memcg = get_mem_cgroup_from_current(); 4525 int ret = 0; 4526 4527 /* 4528 * Even memcg does not account for hugetlb, we still want to update 4529 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip 4530 * charging the memcg. 4531 */ 4532 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || 4533 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4534 goto out; 4535 4536 if (charge_memcg(folio, memcg, gfp)) 4537 ret = -ENOMEM; 4538 4539 out: 4540 mem_cgroup_put(memcg); 4541 return ret; 4542 } 4543 4544 /** 4545 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 4546 * @folio: folio to charge. 4547 * @mm: mm context of the victim 4548 * @gfp: reclaim mode 4549 * @entry: swap entry for which the folio is allocated 4550 * 4551 * This function charges a folio allocated for swapin. Please call this before 4552 * adding the folio to the swapcache. 4553 * 4554 * Returns 0 on success. Otherwise, an error code is returned. 4555 */ 4556 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 4557 gfp_t gfp, swp_entry_t entry) 4558 { 4559 struct mem_cgroup *memcg; 4560 unsigned short id; 4561 int ret; 4562 4563 if (mem_cgroup_disabled()) 4564 return 0; 4565 4566 id = lookup_swap_cgroup_id(entry); 4567 rcu_read_lock(); 4568 memcg = mem_cgroup_from_id(id); 4569 if (!memcg || !css_tryget_online(&memcg->css)) 4570 memcg = get_mem_cgroup_from_mm(mm); 4571 rcu_read_unlock(); 4572 4573 ret = charge_memcg(folio, memcg, gfp); 4574 4575 css_put(&memcg->css); 4576 return ret; 4577 } 4578 4579 /* 4580 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 4581 * @entry: the first swap entry for which the pages are charged 4582 * @nr_pages: number of pages which will be uncharged 4583 * 4584 * Call this function after successfully adding the charged page to swapcache. 4585 * 4586 * Note: This function assumes the page for which swap slot is being uncharged 4587 * is order 0 page. 4588 */ 4589 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 4590 { 4591 /* 4592 * Cgroup1's unified memory+swap counter has been charged with the 4593 * new swapcache page, finish the transfer by uncharging the swap 4594 * slot. The swap slot would also get uncharged when it dies, but 4595 * it can stick around indefinitely and we'd count the page twice 4596 * the entire time. 4597 * 4598 * Cgroup2 has separate resource counters for memory and swap, 4599 * so this is a non-issue here. Memory and swap charge lifetimes 4600 * correspond 1:1 to page and swap slot lifetimes: we charge the 4601 * page to memory here, and uncharge swap when the slot is freed. 4602 */ 4603 if (do_memsw_account()) { 4604 /* 4605 * The swap entry might not get freed for a long time, 4606 * let's not wait for it. The page already received a 4607 * memory+swap charge, drop the swap entry duplicate. 4608 */ 4609 mem_cgroup_uncharge_swap(entry, nr_pages); 4610 } 4611 } 4612 4613 struct uncharge_gather { 4614 struct mem_cgroup *memcg; 4615 unsigned long nr_memory; 4616 unsigned long pgpgout; 4617 unsigned long nr_kmem; 4618 int nid; 4619 }; 4620 4621 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 4622 { 4623 memset(ug, 0, sizeof(*ug)); 4624 } 4625 4626 static void uncharge_batch(const struct uncharge_gather *ug) 4627 { 4628 if (ug->nr_memory) { 4629 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 4630 if (do_memsw_account()) 4631 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 4632 if (ug->nr_kmem) { 4633 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); 4634 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); 4635 } 4636 memcg1_oom_recover(ug->memcg); 4637 } 4638 4639 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); 4640 4641 /* drop reference from uncharge_folio */ 4642 css_put(&ug->memcg->css); 4643 } 4644 4645 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 4646 { 4647 long nr_pages; 4648 struct mem_cgroup *memcg; 4649 struct obj_cgroup *objcg; 4650 4651 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4652 4653 /* 4654 * Nobody should be changing or seriously looking at 4655 * folio memcg or objcg at this point, we have fully 4656 * exclusive access to the folio. 4657 */ 4658 if (folio_memcg_kmem(folio)) { 4659 objcg = __folio_objcg(folio); 4660 /* 4661 * This get matches the put at the end of the function and 4662 * kmem pages do not hold memcg references anymore. 4663 */ 4664 memcg = get_mem_cgroup_from_objcg(objcg); 4665 } else { 4666 memcg = __folio_memcg(folio); 4667 } 4668 4669 if (!memcg) 4670 return; 4671 4672 if (ug->memcg != memcg) { 4673 if (ug->memcg) { 4674 uncharge_batch(ug); 4675 uncharge_gather_clear(ug); 4676 } 4677 ug->memcg = memcg; 4678 ug->nid = folio_nid(folio); 4679 4680 /* pairs with css_put in uncharge_batch */ 4681 css_get(&memcg->css); 4682 } 4683 4684 nr_pages = folio_nr_pages(folio); 4685 4686 if (folio_memcg_kmem(folio)) { 4687 ug->nr_memory += nr_pages; 4688 ug->nr_kmem += nr_pages; 4689 4690 folio->memcg_data = 0; 4691 obj_cgroup_put(objcg); 4692 } else { 4693 /* LRU pages aren't accounted at the root level */ 4694 if (!mem_cgroup_is_root(memcg)) 4695 ug->nr_memory += nr_pages; 4696 ug->pgpgout++; 4697 4698 WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); 4699 folio->memcg_data = 0; 4700 } 4701 4702 css_put(&memcg->css); 4703 } 4704 4705 void __mem_cgroup_uncharge(struct folio *folio) 4706 { 4707 struct uncharge_gather ug; 4708 4709 /* Don't touch folio->lru of any random page, pre-check: */ 4710 if (!folio_memcg_charged(folio)) 4711 return; 4712 4713 uncharge_gather_clear(&ug); 4714 uncharge_folio(folio, &ug); 4715 uncharge_batch(&ug); 4716 } 4717 4718 void __mem_cgroup_uncharge_folios(struct folio_batch *folios) 4719 { 4720 struct uncharge_gather ug; 4721 unsigned int i; 4722 4723 uncharge_gather_clear(&ug); 4724 for (i = 0; i < folios->nr; i++) 4725 uncharge_folio(folios->folios[i], &ug); 4726 if (ug.memcg) 4727 uncharge_batch(&ug); 4728 } 4729 4730 /** 4731 * mem_cgroup_replace_folio - Charge a folio's replacement. 4732 * @old: Currently circulating folio. 4733 * @new: Replacement folio. 4734 * 4735 * Charge @new as a replacement folio for @old. @old will 4736 * be uncharged upon free. 4737 * 4738 * Both folios must be locked, @new->mapping must be set up. 4739 */ 4740 void mem_cgroup_replace_folio(struct folio *old, struct folio *new) 4741 { 4742 struct mem_cgroup *memcg; 4743 long nr_pages = folio_nr_pages(new); 4744 4745 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 4746 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 4747 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 4748 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 4749 4750 if (mem_cgroup_disabled()) 4751 return; 4752 4753 /* Page cache replacement: new folio already charged? */ 4754 if (folio_memcg_charged(new)) 4755 return; 4756 4757 memcg = folio_memcg(old); 4758 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 4759 if (!memcg) 4760 return; 4761 4762 /* Force-charge the new page. The old one will be freed soon */ 4763 if (!mem_cgroup_is_root(memcg)) { 4764 page_counter_charge(&memcg->memory, nr_pages); 4765 if (do_memsw_account()) 4766 page_counter_charge(&memcg->memsw, nr_pages); 4767 } 4768 4769 css_get(&memcg->css); 4770 commit_charge(new, memcg); 4771 memcg1_commit_charge(new, memcg); 4772 } 4773 4774 /** 4775 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio. 4776 * @old: Currently circulating folio. 4777 * @new: Replacement folio. 4778 * 4779 * Transfer the memcg data from the old folio to the new folio for migration. 4780 * The old folio's data info will be cleared. Note that the memory counters 4781 * will remain unchanged throughout the process. 4782 * 4783 * Both folios must be locked, @new->mapping must be set up. 4784 */ 4785 void mem_cgroup_migrate(struct folio *old, struct folio *new) 4786 { 4787 struct mem_cgroup *memcg; 4788 4789 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 4790 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 4791 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 4792 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new); 4793 VM_BUG_ON_FOLIO(folio_test_lru(old), old); 4794 4795 if (mem_cgroup_disabled()) 4796 return; 4797 4798 memcg = folio_memcg(old); 4799 /* 4800 * Note that it is normal to see !memcg for a hugetlb folio. 4801 * For e.g, itt could have been allocated when memory_hugetlb_accounting 4802 * was not selected. 4803 */ 4804 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); 4805 if (!memcg) 4806 return; 4807 4808 /* Transfer the charge and the css ref */ 4809 commit_charge(new, memcg); 4810 4811 /* Warning should never happen, so don't worry about refcount non-0 */ 4812 WARN_ON_ONCE(folio_unqueue_deferred_split(old)); 4813 old->memcg_data = 0; 4814 } 4815 4816 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 4817 EXPORT_SYMBOL(memcg_sockets_enabled_key); 4818 4819 void mem_cgroup_sk_alloc(struct sock *sk) 4820 { 4821 struct mem_cgroup *memcg; 4822 4823 if (!mem_cgroup_sockets_enabled) 4824 return; 4825 4826 /* Do not associate the sock with unrelated interrupted task's memcg. */ 4827 if (!in_task()) 4828 return; 4829 4830 rcu_read_lock(); 4831 memcg = mem_cgroup_from_task(current); 4832 if (mem_cgroup_is_root(memcg)) 4833 goto out; 4834 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) 4835 goto out; 4836 if (css_tryget(&memcg->css)) 4837 sk->sk_memcg = memcg; 4838 out: 4839 rcu_read_unlock(); 4840 } 4841 4842 void mem_cgroup_sk_free(struct sock *sk) 4843 { 4844 if (sk->sk_memcg) 4845 css_put(&sk->sk_memcg->css); 4846 } 4847 4848 /** 4849 * mem_cgroup_charge_skmem - charge socket memory 4850 * @memcg: memcg to charge 4851 * @nr_pages: number of pages to charge 4852 * @gfp_mask: reclaim mode 4853 * 4854 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 4855 * @memcg's configured limit, %false if it doesn't. 4856 */ 4857 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 4858 gfp_t gfp_mask) 4859 { 4860 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4861 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); 4862 4863 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 4864 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 4865 return true; 4866 } 4867 4868 return false; 4869 } 4870 4871 /** 4872 * mem_cgroup_uncharge_skmem - uncharge socket memory 4873 * @memcg: memcg to uncharge 4874 * @nr_pages: number of pages to uncharge 4875 */ 4876 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 4877 { 4878 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 4879 memcg1_uncharge_skmem(memcg, nr_pages); 4880 return; 4881 } 4882 4883 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 4884 4885 refill_stock(memcg, nr_pages); 4886 } 4887 4888 static int __init cgroup_memory(char *s) 4889 { 4890 char *token; 4891 4892 while ((token = strsep(&s, ",")) != NULL) { 4893 if (!*token) 4894 continue; 4895 if (!strcmp(token, "nosocket")) 4896 cgroup_memory_nosocket = true; 4897 if (!strcmp(token, "nokmem")) 4898 cgroup_memory_nokmem = true; 4899 if (!strcmp(token, "nobpf")) 4900 cgroup_memory_nobpf = true; 4901 } 4902 return 1; 4903 } 4904 __setup("cgroup.memory=", cgroup_memory); 4905 4906 /* 4907 * subsys_initcall() for memory controller. 4908 * 4909 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 4910 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 4911 * basically everything that doesn't depend on a specific mem_cgroup structure 4912 * should be initialized from here. 4913 */ 4914 static int __init mem_cgroup_init(void) 4915 { 4916 int cpu; 4917 4918 /* 4919 * Currently s32 type (can refer to struct batched_lruvec_stat) is 4920 * used for per-memcg-per-cpu caching of per-node statistics. In order 4921 * to work fine, we should make sure that the overfill threshold can't 4922 * exceed S32_MAX / PAGE_SIZE. 4923 */ 4924 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 4925 4926 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 4927 memcg_hotplug_cpu_dead); 4928 4929 for_each_possible_cpu(cpu) 4930 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 4931 drain_local_stock); 4932 4933 return 0; 4934 } 4935 subsys_initcall(mem_cgroup_init); 4936 4937 #ifdef CONFIG_SWAP 4938 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 4939 { 4940 while (!refcount_inc_not_zero(&memcg->id.ref)) { 4941 /* 4942 * The root cgroup cannot be destroyed, so it's refcount must 4943 * always be >= 1. 4944 */ 4945 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 4946 VM_BUG_ON(1); 4947 break; 4948 } 4949 memcg = parent_mem_cgroup(memcg); 4950 if (!memcg) 4951 memcg = root_mem_cgroup; 4952 } 4953 return memcg; 4954 } 4955 4956 /** 4957 * mem_cgroup_swapout - transfer a memsw charge to swap 4958 * @folio: folio whose memsw charge to transfer 4959 * @entry: swap entry to move the charge to 4960 * 4961 * Transfer the memsw charge of @folio to @entry. 4962 */ 4963 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) 4964 { 4965 struct mem_cgroup *memcg, *swap_memcg; 4966 unsigned int nr_entries; 4967 4968 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4969 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 4970 4971 if (mem_cgroup_disabled()) 4972 return; 4973 4974 if (!do_memsw_account()) 4975 return; 4976 4977 memcg = folio_memcg(folio); 4978 4979 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 4980 if (!memcg) 4981 return; 4982 4983 /* 4984 * In case the memcg owning these pages has been offlined and doesn't 4985 * have an ID allocated to it anymore, charge the closest online 4986 * ancestor for the swap instead and transfer the memory+swap charge. 4987 */ 4988 swap_memcg = mem_cgroup_id_get_online(memcg); 4989 nr_entries = folio_nr_pages(folio); 4990 /* Get references for the tail pages, too */ 4991 if (nr_entries > 1) 4992 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 4993 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 4994 4995 swap_cgroup_record(folio, entry); 4996 4997 folio_unqueue_deferred_split(folio); 4998 folio->memcg_data = 0; 4999 5000 if (!mem_cgroup_is_root(memcg)) 5001 page_counter_uncharge(&memcg->memory, nr_entries); 5002 5003 if (memcg != swap_memcg) { 5004 if (!mem_cgroup_is_root(swap_memcg)) 5005 page_counter_charge(&swap_memcg->memsw, nr_entries); 5006 page_counter_uncharge(&memcg->memsw, nr_entries); 5007 } 5008 5009 memcg1_swapout(folio, memcg); 5010 css_put(&memcg->css); 5011 } 5012 5013 /** 5014 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 5015 * @folio: folio being added to swap 5016 * @entry: swap entry to charge 5017 * 5018 * Try to charge @folio's memcg for the swap space at @entry. 5019 * 5020 * Returns 0 on success, -ENOMEM on failure. 5021 */ 5022 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 5023 { 5024 unsigned int nr_pages = folio_nr_pages(folio); 5025 struct page_counter *counter; 5026 struct mem_cgroup *memcg; 5027 5028 if (do_memsw_account()) 5029 return 0; 5030 5031 memcg = folio_memcg(folio); 5032 5033 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 5034 if (!memcg) 5035 return 0; 5036 5037 if (!entry.val) { 5038 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5039 return 0; 5040 } 5041 5042 memcg = mem_cgroup_id_get_online(memcg); 5043 5044 if (!mem_cgroup_is_root(memcg) && 5045 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 5046 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 5047 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5048 mem_cgroup_id_put(memcg); 5049 return -ENOMEM; 5050 } 5051 5052 /* Get references for the tail pages, too */ 5053 if (nr_pages > 1) 5054 mem_cgroup_id_get_many(memcg, nr_pages - 1); 5055 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 5056 5057 swap_cgroup_record(folio, entry); 5058 5059 return 0; 5060 } 5061 5062 /** 5063 * __mem_cgroup_uncharge_swap - uncharge swap space 5064 * @entry: swap entry to uncharge 5065 * @nr_pages: the amount of swap space to uncharge 5066 */ 5067 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 5068 { 5069 struct mem_cgroup *memcg; 5070 unsigned short id; 5071 5072 id = swap_cgroup_clear(entry, nr_pages); 5073 rcu_read_lock(); 5074 memcg = mem_cgroup_from_id(id); 5075 if (memcg) { 5076 if (!mem_cgroup_is_root(memcg)) { 5077 if (do_memsw_account()) 5078 page_counter_uncharge(&memcg->memsw, nr_pages); 5079 else 5080 page_counter_uncharge(&memcg->swap, nr_pages); 5081 } 5082 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 5083 mem_cgroup_id_put_many(memcg, nr_pages); 5084 } 5085 rcu_read_unlock(); 5086 } 5087 5088 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5089 { 5090 long nr_swap_pages = get_nr_swap_pages(); 5091 5092 if (mem_cgroup_disabled() || do_memsw_account()) 5093 return nr_swap_pages; 5094 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 5095 nr_swap_pages = min_t(long, nr_swap_pages, 5096 READ_ONCE(memcg->swap.max) - 5097 page_counter_read(&memcg->swap)); 5098 return nr_swap_pages; 5099 } 5100 5101 bool mem_cgroup_swap_full(struct folio *folio) 5102 { 5103 struct mem_cgroup *memcg; 5104 5105 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5106 5107 if (vm_swap_full()) 5108 return true; 5109 if (do_memsw_account()) 5110 return false; 5111 5112 memcg = folio_memcg(folio); 5113 if (!memcg) 5114 return false; 5115 5116 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 5117 unsigned long usage = page_counter_read(&memcg->swap); 5118 5119 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 5120 usage * 2 >= READ_ONCE(memcg->swap.max)) 5121 return true; 5122 } 5123 5124 return false; 5125 } 5126 5127 static int __init setup_swap_account(char *s) 5128 { 5129 bool res; 5130 5131 if (!kstrtobool(s, &res) && !res) 5132 pr_warn_once("The swapaccount=0 commandline option is deprecated " 5133 "in favor of configuring swap control via cgroupfs. " 5134 "Please report your usecase to linux-mm@kvack.org if you " 5135 "depend on this functionality.\n"); 5136 return 1; 5137 } 5138 __setup("swapaccount=", setup_swap_account); 5139 5140 static u64 swap_current_read(struct cgroup_subsys_state *css, 5141 struct cftype *cft) 5142 { 5143 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5144 5145 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 5146 } 5147 5148 static int swap_peak_show(struct seq_file *sf, void *v) 5149 { 5150 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 5151 5152 return peak_show(sf, v, &memcg->swap); 5153 } 5154 5155 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf, 5156 size_t nbytes, loff_t off) 5157 { 5158 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5159 5160 return peak_write(of, buf, nbytes, off, &memcg->swap, 5161 &memcg->swap_peaks); 5162 } 5163 5164 static int swap_high_show(struct seq_file *m, void *v) 5165 { 5166 return seq_puts_memcg_tunable(m, 5167 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 5168 } 5169 5170 static ssize_t swap_high_write(struct kernfs_open_file *of, 5171 char *buf, size_t nbytes, loff_t off) 5172 { 5173 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5174 unsigned long high; 5175 int err; 5176 5177 buf = strstrip(buf); 5178 err = page_counter_memparse(buf, "max", &high); 5179 if (err) 5180 return err; 5181 5182 page_counter_set_high(&memcg->swap, high); 5183 5184 return nbytes; 5185 } 5186 5187 static int swap_max_show(struct seq_file *m, void *v) 5188 { 5189 return seq_puts_memcg_tunable(m, 5190 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 5191 } 5192 5193 static ssize_t swap_max_write(struct kernfs_open_file *of, 5194 char *buf, size_t nbytes, loff_t off) 5195 { 5196 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5197 unsigned long max; 5198 int err; 5199 5200 buf = strstrip(buf); 5201 err = page_counter_memparse(buf, "max", &max); 5202 if (err) 5203 return err; 5204 5205 xchg(&memcg->swap.max, max); 5206 5207 return nbytes; 5208 } 5209 5210 static int swap_events_show(struct seq_file *m, void *v) 5211 { 5212 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5213 5214 seq_printf(m, "high %lu\n", 5215 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 5216 seq_printf(m, "max %lu\n", 5217 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 5218 seq_printf(m, "fail %lu\n", 5219 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 5220 5221 return 0; 5222 } 5223 5224 static struct cftype swap_files[] = { 5225 { 5226 .name = "swap.current", 5227 .flags = CFTYPE_NOT_ON_ROOT, 5228 .read_u64 = swap_current_read, 5229 }, 5230 { 5231 .name = "swap.high", 5232 .flags = CFTYPE_NOT_ON_ROOT, 5233 .seq_show = swap_high_show, 5234 .write = swap_high_write, 5235 }, 5236 { 5237 .name = "swap.max", 5238 .flags = CFTYPE_NOT_ON_ROOT, 5239 .seq_show = swap_max_show, 5240 .write = swap_max_write, 5241 }, 5242 { 5243 .name = "swap.peak", 5244 .flags = CFTYPE_NOT_ON_ROOT, 5245 .open = peak_open, 5246 .release = peak_release, 5247 .seq_show = swap_peak_show, 5248 .write = swap_peak_write, 5249 }, 5250 { 5251 .name = "swap.events", 5252 .flags = CFTYPE_NOT_ON_ROOT, 5253 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 5254 .seq_show = swap_events_show, 5255 }, 5256 { } /* terminate */ 5257 }; 5258 5259 #ifdef CONFIG_ZSWAP 5260 /** 5261 * obj_cgroup_may_zswap - check if this cgroup can zswap 5262 * @objcg: the object cgroup 5263 * 5264 * Check if the hierarchical zswap limit has been reached. 5265 * 5266 * This doesn't check for specific headroom, and it is not atomic 5267 * either. But with zswap, the size of the allocation is only known 5268 * once compression has occurred, and this optimistic pre-check avoids 5269 * spending cycles on compression when there is already no room left 5270 * or zswap is disabled altogether somewhere in the hierarchy. 5271 */ 5272 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 5273 { 5274 struct mem_cgroup *memcg, *original_memcg; 5275 bool ret = true; 5276 5277 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5278 return true; 5279 5280 original_memcg = get_mem_cgroup_from_objcg(objcg); 5281 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 5282 memcg = parent_mem_cgroup(memcg)) { 5283 unsigned long max = READ_ONCE(memcg->zswap_max); 5284 unsigned long pages; 5285 5286 if (max == PAGE_COUNTER_MAX) 5287 continue; 5288 if (max == 0) { 5289 ret = false; 5290 break; 5291 } 5292 5293 /* Force flush to get accurate stats for charging */ 5294 __mem_cgroup_flush_stats(memcg, true); 5295 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 5296 if (pages < max) 5297 continue; 5298 ret = false; 5299 break; 5300 } 5301 mem_cgroup_put(original_memcg); 5302 return ret; 5303 } 5304 5305 /** 5306 * obj_cgroup_charge_zswap - charge compression backend memory 5307 * @objcg: the object cgroup 5308 * @size: size of compressed object 5309 * 5310 * This forces the charge after obj_cgroup_may_zswap() allowed 5311 * compression and storage in zwap for this cgroup to go ahead. 5312 */ 5313 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 5314 { 5315 struct mem_cgroup *memcg; 5316 5317 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5318 return; 5319 5320 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 5321 5322 /* PF_MEMALLOC context, charging must succeed */ 5323 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 5324 VM_WARN_ON_ONCE(1); 5325 5326 rcu_read_lock(); 5327 memcg = obj_cgroup_memcg(objcg); 5328 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 5329 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 5330 rcu_read_unlock(); 5331 } 5332 5333 /** 5334 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 5335 * @objcg: the object cgroup 5336 * @size: size of compressed object 5337 * 5338 * Uncharges zswap memory on page in. 5339 */ 5340 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 5341 { 5342 struct mem_cgroup *memcg; 5343 5344 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5345 return; 5346 5347 obj_cgroup_uncharge(objcg, size); 5348 5349 rcu_read_lock(); 5350 memcg = obj_cgroup_memcg(objcg); 5351 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 5352 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 5353 rcu_read_unlock(); 5354 } 5355 5356 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 5357 { 5358 /* if zswap is disabled, do not block pages going to the swapping device */ 5359 if (!zswap_is_enabled()) 5360 return true; 5361 5362 for (; memcg; memcg = parent_mem_cgroup(memcg)) 5363 if (!READ_ONCE(memcg->zswap_writeback)) 5364 return false; 5365 5366 return true; 5367 } 5368 5369 static u64 zswap_current_read(struct cgroup_subsys_state *css, 5370 struct cftype *cft) 5371 { 5372 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5373 5374 mem_cgroup_flush_stats(memcg); 5375 return memcg_page_state(memcg, MEMCG_ZSWAP_B); 5376 } 5377 5378 static int zswap_max_show(struct seq_file *m, void *v) 5379 { 5380 return seq_puts_memcg_tunable(m, 5381 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 5382 } 5383 5384 static ssize_t zswap_max_write(struct kernfs_open_file *of, 5385 char *buf, size_t nbytes, loff_t off) 5386 { 5387 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5388 unsigned long max; 5389 int err; 5390 5391 buf = strstrip(buf); 5392 err = page_counter_memparse(buf, "max", &max); 5393 if (err) 5394 return err; 5395 5396 xchg(&memcg->zswap_max, max); 5397 5398 return nbytes; 5399 } 5400 5401 static int zswap_writeback_show(struct seq_file *m, void *v) 5402 { 5403 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5404 5405 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); 5406 return 0; 5407 } 5408 5409 static ssize_t zswap_writeback_write(struct kernfs_open_file *of, 5410 char *buf, size_t nbytes, loff_t off) 5411 { 5412 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5413 int zswap_writeback; 5414 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback); 5415 5416 if (parse_ret) 5417 return parse_ret; 5418 5419 if (zswap_writeback != 0 && zswap_writeback != 1) 5420 return -EINVAL; 5421 5422 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); 5423 return nbytes; 5424 } 5425 5426 static struct cftype zswap_files[] = { 5427 { 5428 .name = "zswap.current", 5429 .flags = CFTYPE_NOT_ON_ROOT, 5430 .read_u64 = zswap_current_read, 5431 }, 5432 { 5433 .name = "zswap.max", 5434 .flags = CFTYPE_NOT_ON_ROOT, 5435 .seq_show = zswap_max_show, 5436 .write = zswap_max_write, 5437 }, 5438 { 5439 .name = "zswap.writeback", 5440 .seq_show = zswap_writeback_show, 5441 .write = zswap_writeback_write, 5442 }, 5443 { } /* terminate */ 5444 }; 5445 #endif /* CONFIG_ZSWAP */ 5446 5447 static int __init mem_cgroup_swap_init(void) 5448 { 5449 if (mem_cgroup_disabled()) 5450 return 0; 5451 5452 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 5453 #ifdef CONFIG_MEMCG_V1 5454 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 5455 #endif 5456 #ifdef CONFIG_ZSWAP 5457 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 5458 #endif 5459 return 0; 5460 } 5461 subsys_initcall(mem_cgroup_swap_init); 5462 5463 #endif /* CONFIG_SWAP */ 5464