1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/cgroup-defs.h> 29 #include <linux/page_counter.h> 30 #include <linux/memcontrol.h> 31 #include <linux/cgroup.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/pagevec.h> 37 #include <linux/vm_event_item.h> 38 #include <linux/smp.h> 39 #include <linux/page-flags.h> 40 #include <linux/backing-dev.h> 41 #include <linux/bit_spinlock.h> 42 #include <linux/rcupdate.h> 43 #include <linux/limits.h> 44 #include <linux/export.h> 45 #include <linux/list.h> 46 #include <linux/mutex.h> 47 #include <linux/rbtree.h> 48 #include <linux/slab.h> 49 #include <linux/swapops.h> 50 #include <linux/spinlock.h> 51 #include <linux/fs.h> 52 #include <linux/seq_file.h> 53 #include <linux/parser.h> 54 #include <linux/vmpressure.h> 55 #include <linux/memremap.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/resume_user_mode.h> 62 #include <linux/psi.h> 63 #include <linux/seq_buf.h> 64 #include <linux/sched/isolation.h> 65 #include <linux/kmemleak.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 #include "memcontrol-v1.h" 71 72 #include <linux/uaccess.h> 73 74 #define CREATE_TRACE_POINTS 75 #include <trace/events/memcg.h> 76 #undef CREATE_TRACE_POINTS 77 78 #include <trace/events/vmscan.h> 79 80 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 81 EXPORT_SYMBOL(memory_cgrp_subsys); 82 83 struct mem_cgroup *root_mem_cgroup __read_mostly; 84 85 /* Active memory cgroup to use from an interrupt context */ 86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 88 89 /* Socket memory accounting disabled? */ 90 static bool cgroup_memory_nosocket __ro_after_init; 91 92 /* Kernel memory accounting disabled? */ 93 static bool cgroup_memory_nokmem __ro_after_init; 94 95 /* BPF memory accounting disabled? */ 96 static bool cgroup_memory_nobpf __ro_after_init; 97 98 #ifdef CONFIG_CGROUP_WRITEBACK 99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 100 #endif 101 102 static inline bool task_is_dying(void) 103 { 104 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 105 (current->flags & PF_EXITING); 106 } 107 108 /* Some nice accessors for the vmpressure. */ 109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 110 { 111 if (!memcg) 112 memcg = root_mem_cgroup; 113 return &memcg->vmpressure; 114 } 115 116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 117 { 118 return container_of(vmpr, struct mem_cgroup, vmpressure); 119 } 120 121 #define SEQ_BUF_SIZE SZ_4K 122 #define CURRENT_OBJCG_UPDATE_BIT 0 123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT) 124 125 static DEFINE_SPINLOCK(objcg_lock); 126 127 bool mem_cgroup_kmem_disabled(void) 128 { 129 return cgroup_memory_nokmem; 130 } 131 132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 133 unsigned int nr_pages); 134 135 static void obj_cgroup_release(struct percpu_ref *ref) 136 { 137 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 138 unsigned int nr_bytes; 139 unsigned int nr_pages; 140 unsigned long flags; 141 142 /* 143 * At this point all allocated objects are freed, and 144 * objcg->nr_charged_bytes can't have an arbitrary byte value. 145 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 146 * 147 * The following sequence can lead to it: 148 * 1) CPU0: objcg == stock->cached_objcg 149 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 150 * PAGE_SIZE bytes are charged 151 * 3) CPU1: a process from another memcg is allocating something, 152 * the stock if flushed, 153 * objcg->nr_charged_bytes = PAGE_SIZE - 92 154 * 5) CPU0: we do release this object, 155 * 92 bytes are added to stock->nr_bytes 156 * 6) CPU0: stock is flushed, 157 * 92 bytes are added to objcg->nr_charged_bytes 158 * 159 * In the result, nr_charged_bytes == PAGE_SIZE. 160 * This page will be uncharged in obj_cgroup_release(). 161 */ 162 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 163 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 164 nr_pages = nr_bytes >> PAGE_SHIFT; 165 166 if (nr_pages) 167 obj_cgroup_uncharge_pages(objcg, nr_pages); 168 169 spin_lock_irqsave(&objcg_lock, flags); 170 list_del(&objcg->list); 171 spin_unlock_irqrestore(&objcg_lock, flags); 172 173 percpu_ref_exit(ref); 174 kfree_rcu(objcg, rcu); 175 } 176 177 static struct obj_cgroup *obj_cgroup_alloc(void) 178 { 179 struct obj_cgroup *objcg; 180 int ret; 181 182 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 183 if (!objcg) 184 return NULL; 185 186 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 187 GFP_KERNEL); 188 if (ret) { 189 kfree(objcg); 190 return NULL; 191 } 192 INIT_LIST_HEAD(&objcg->list); 193 return objcg; 194 } 195 196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 197 struct mem_cgroup *parent) 198 { 199 struct obj_cgroup *objcg, *iter; 200 201 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 202 203 spin_lock_irq(&objcg_lock); 204 205 /* 1) Ready to reparent active objcg. */ 206 list_add(&objcg->list, &memcg->objcg_list); 207 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 208 list_for_each_entry(iter, &memcg->objcg_list, list) 209 WRITE_ONCE(iter->memcg, parent); 210 /* 3) Move already reparented objcgs to the parent's list */ 211 list_splice(&memcg->objcg_list, &parent->objcg_list); 212 213 spin_unlock_irq(&objcg_lock); 214 215 percpu_ref_kill(&objcg->refcnt); 216 } 217 218 /* 219 * A lot of the calls to the cache allocation functions are expected to be 220 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are 221 * conditional to this static branch, we'll have to allow modules that does 222 * kmem_cache_alloc and the such to see this symbol as well 223 */ 224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 225 EXPORT_SYMBOL(memcg_kmem_online_key); 226 227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 228 EXPORT_SYMBOL(memcg_bpf_enabled_key); 229 230 /** 231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 232 * @folio: folio of interest 233 * 234 * If memcg is bound to the default hierarchy, css of the memcg associated 235 * with @folio is returned. The returned css remains associated with @folio 236 * until it is released. 237 * 238 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 239 * is returned. 240 */ 241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 242 { 243 struct mem_cgroup *memcg = folio_memcg(folio); 244 245 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 246 memcg = root_mem_cgroup; 247 248 return &memcg->css; 249 } 250 251 /** 252 * page_cgroup_ino - return inode number of the memcg a page is charged to 253 * @page: the page 254 * 255 * Look up the closest online ancestor of the memory cgroup @page is charged to 256 * and return its inode number or 0 if @page is not charged to any cgroup. It 257 * is safe to call this function without holding a reference to @page. 258 * 259 * Note, this function is inherently racy, because there is nothing to prevent 260 * the cgroup inode from getting torn down and potentially reallocated a moment 261 * after page_cgroup_ino() returns, so it only should be used by callers that 262 * do not care (such as procfs interfaces). 263 */ 264 ino_t page_cgroup_ino(struct page *page) 265 { 266 struct mem_cgroup *memcg; 267 unsigned long ino = 0; 268 269 rcu_read_lock(); 270 /* page_folio() is racy here, but the entire function is racy anyway */ 271 memcg = folio_memcg_check(page_folio(page)); 272 273 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 274 memcg = parent_mem_cgroup(memcg); 275 if (memcg) 276 ino = cgroup_ino(memcg->css.cgroup); 277 rcu_read_unlock(); 278 return ino; 279 } 280 281 /* Subset of node_stat_item for memcg stats */ 282 static const unsigned int memcg_node_stat_items[] = { 283 NR_INACTIVE_ANON, 284 NR_ACTIVE_ANON, 285 NR_INACTIVE_FILE, 286 NR_ACTIVE_FILE, 287 NR_UNEVICTABLE, 288 NR_SLAB_RECLAIMABLE_B, 289 NR_SLAB_UNRECLAIMABLE_B, 290 WORKINGSET_REFAULT_ANON, 291 WORKINGSET_REFAULT_FILE, 292 WORKINGSET_ACTIVATE_ANON, 293 WORKINGSET_ACTIVATE_FILE, 294 WORKINGSET_RESTORE_ANON, 295 WORKINGSET_RESTORE_FILE, 296 WORKINGSET_NODERECLAIM, 297 NR_ANON_MAPPED, 298 NR_FILE_MAPPED, 299 NR_FILE_PAGES, 300 NR_FILE_DIRTY, 301 NR_WRITEBACK, 302 NR_SHMEM, 303 NR_SHMEM_THPS, 304 NR_FILE_THPS, 305 NR_ANON_THPS, 306 NR_KERNEL_STACK_KB, 307 NR_PAGETABLE, 308 NR_SECONDARY_PAGETABLE, 309 #ifdef CONFIG_SWAP 310 NR_SWAPCACHE, 311 #endif 312 #ifdef CONFIG_NUMA_BALANCING 313 PGPROMOTE_SUCCESS, 314 #endif 315 PGDEMOTE_KSWAPD, 316 PGDEMOTE_DIRECT, 317 PGDEMOTE_KHUGEPAGED, 318 #ifdef CONFIG_HUGETLB_PAGE 319 NR_HUGETLB, 320 #endif 321 }; 322 323 static const unsigned int memcg_stat_items[] = { 324 MEMCG_SWAP, 325 MEMCG_SOCK, 326 MEMCG_PERCPU_B, 327 MEMCG_VMALLOC, 328 MEMCG_KMEM, 329 MEMCG_ZSWAP_B, 330 MEMCG_ZSWAPPED, 331 }; 332 333 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items) 334 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \ 335 ARRAY_SIZE(memcg_stat_items)) 336 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX) 337 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly; 338 339 static void init_memcg_stats(void) 340 { 341 u8 i, j = 0; 342 343 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX); 344 345 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index)); 346 347 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j) 348 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j; 349 350 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j) 351 mem_cgroup_stats_index[memcg_stat_items[i]] = j; 352 } 353 354 static inline int memcg_stats_index(int idx) 355 { 356 return mem_cgroup_stats_index[idx]; 357 } 358 359 struct lruvec_stats_percpu { 360 /* Local (CPU and cgroup) state */ 361 long state[NR_MEMCG_NODE_STAT_ITEMS]; 362 363 /* Delta calculation for lockless upward propagation */ 364 long state_prev[NR_MEMCG_NODE_STAT_ITEMS]; 365 }; 366 367 struct lruvec_stats { 368 /* Aggregated (CPU and subtree) state */ 369 long state[NR_MEMCG_NODE_STAT_ITEMS]; 370 371 /* Non-hierarchical (CPU aggregated) state */ 372 long state_local[NR_MEMCG_NODE_STAT_ITEMS]; 373 374 /* Pending child counts during tree propagation */ 375 long state_pending[NR_MEMCG_NODE_STAT_ITEMS]; 376 }; 377 378 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) 379 { 380 struct mem_cgroup_per_node *pn; 381 long x; 382 int i; 383 384 if (mem_cgroup_disabled()) 385 return node_page_state(lruvec_pgdat(lruvec), idx); 386 387 i = memcg_stats_index(idx); 388 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 389 return 0; 390 391 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 392 x = READ_ONCE(pn->lruvec_stats->state[i]); 393 #ifdef CONFIG_SMP 394 if (x < 0) 395 x = 0; 396 #endif 397 return x; 398 } 399 400 unsigned long lruvec_page_state_local(struct lruvec *lruvec, 401 enum node_stat_item idx) 402 { 403 struct mem_cgroup_per_node *pn; 404 long x; 405 int i; 406 407 if (mem_cgroup_disabled()) 408 return node_page_state(lruvec_pgdat(lruvec), idx); 409 410 i = memcg_stats_index(idx); 411 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 412 return 0; 413 414 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 415 x = READ_ONCE(pn->lruvec_stats->state_local[i]); 416 #ifdef CONFIG_SMP 417 if (x < 0) 418 x = 0; 419 #endif 420 return x; 421 } 422 423 /* Subset of vm_event_item to report for memcg event stats */ 424 static const unsigned int memcg_vm_event_stat[] = { 425 #ifdef CONFIG_MEMCG_V1 426 PGPGIN, 427 PGPGOUT, 428 #endif 429 PSWPIN, 430 PSWPOUT, 431 PGSCAN_KSWAPD, 432 PGSCAN_DIRECT, 433 PGSCAN_KHUGEPAGED, 434 PGSTEAL_KSWAPD, 435 PGSTEAL_DIRECT, 436 PGSTEAL_KHUGEPAGED, 437 PGFAULT, 438 PGMAJFAULT, 439 PGREFILL, 440 PGACTIVATE, 441 PGDEACTIVATE, 442 PGLAZYFREE, 443 PGLAZYFREED, 444 #ifdef CONFIG_SWAP 445 SWPIN_ZERO, 446 SWPOUT_ZERO, 447 #endif 448 #ifdef CONFIG_ZSWAP 449 ZSWPIN, 450 ZSWPOUT, 451 ZSWPWB, 452 #endif 453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 454 THP_FAULT_ALLOC, 455 THP_COLLAPSE_ALLOC, 456 THP_SWPOUT, 457 THP_SWPOUT_FALLBACK, 458 #endif 459 #ifdef CONFIG_NUMA_BALANCING 460 NUMA_PAGE_MIGRATE, 461 NUMA_PTE_UPDATES, 462 NUMA_HINT_FAULTS, 463 #endif 464 }; 465 466 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 467 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 468 469 static void init_memcg_events(void) 470 { 471 u8 i; 472 473 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX); 474 475 memset(mem_cgroup_events_index, U8_MAX, 476 sizeof(mem_cgroup_events_index)); 477 478 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 479 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i; 480 } 481 482 static inline int memcg_events_index(enum vm_event_item idx) 483 { 484 return mem_cgroup_events_index[idx]; 485 } 486 487 struct memcg_vmstats_percpu { 488 /* Stats updates since the last flush */ 489 unsigned int stats_updates; 490 491 /* Cached pointers for fast iteration in memcg_rstat_updated() */ 492 struct memcg_vmstats_percpu *parent; 493 struct memcg_vmstats *vmstats; 494 495 /* The above should fit a single cacheline for memcg_rstat_updated() */ 496 497 /* Local (CPU and cgroup) page state & events */ 498 long state[MEMCG_VMSTAT_SIZE]; 499 unsigned long events[NR_MEMCG_EVENTS]; 500 501 /* Delta calculation for lockless upward propagation */ 502 long state_prev[MEMCG_VMSTAT_SIZE]; 503 unsigned long events_prev[NR_MEMCG_EVENTS]; 504 } ____cacheline_aligned; 505 506 struct memcg_vmstats { 507 /* Aggregated (CPU and subtree) page state & events */ 508 long state[MEMCG_VMSTAT_SIZE]; 509 unsigned long events[NR_MEMCG_EVENTS]; 510 511 /* Non-hierarchical (CPU aggregated) page state & events */ 512 long state_local[MEMCG_VMSTAT_SIZE]; 513 unsigned long events_local[NR_MEMCG_EVENTS]; 514 515 /* Pending child counts during tree propagation */ 516 long state_pending[MEMCG_VMSTAT_SIZE]; 517 unsigned long events_pending[NR_MEMCG_EVENTS]; 518 519 /* Stats updates since the last flush */ 520 atomic64_t stats_updates; 521 }; 522 523 /* 524 * memcg and lruvec stats flushing 525 * 526 * Many codepaths leading to stats update or read are performance sensitive and 527 * adding stats flushing in such codepaths is not desirable. So, to optimize the 528 * flushing the kernel does: 529 * 530 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 531 * rstat update tree grow unbounded. 532 * 533 * 2) Flush the stats synchronously on reader side only when there are more than 534 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 535 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 536 * only for 2 seconds due to (1). 537 */ 538 static void flush_memcg_stats_dwork(struct work_struct *w); 539 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 540 static u64 flush_last_time; 541 542 #define FLUSH_TIME (2UL*HZ) 543 544 /* 545 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can 546 * not rely on this as part of an acquired spinlock_t lock. These functions are 547 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion 548 * is sufficient. 549 */ 550 static void memcg_stats_lock(void) 551 { 552 preempt_disable_nested(); 553 VM_WARN_ON_IRQS_ENABLED(); 554 } 555 556 static void __memcg_stats_lock(void) 557 { 558 preempt_disable_nested(); 559 } 560 561 static void memcg_stats_unlock(void) 562 { 563 preempt_enable_nested(); 564 } 565 566 567 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) 568 { 569 return atomic64_read(&vmstats->stats_updates) > 570 MEMCG_CHARGE_BATCH * num_online_cpus(); 571 } 572 573 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) 574 { 575 struct memcg_vmstats_percpu *statc; 576 int cpu = smp_processor_id(); 577 unsigned int stats_updates; 578 579 if (!val) 580 return; 581 582 cgroup_rstat_updated(memcg->css.cgroup, cpu); 583 statc = this_cpu_ptr(memcg->vmstats_percpu); 584 for (; statc; statc = statc->parent) { 585 stats_updates = READ_ONCE(statc->stats_updates) + abs(val); 586 WRITE_ONCE(statc->stats_updates, stats_updates); 587 if (stats_updates < MEMCG_CHARGE_BATCH) 588 continue; 589 590 /* 591 * If @memcg is already flush-able, increasing stats_updates is 592 * redundant. Avoid the overhead of the atomic update. 593 */ 594 if (!memcg_vmstats_needs_flush(statc->vmstats)) 595 atomic64_add(stats_updates, 596 &statc->vmstats->stats_updates); 597 WRITE_ONCE(statc->stats_updates, 0); 598 } 599 } 600 601 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) 602 { 603 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); 604 605 trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates), 606 force, needs_flush); 607 608 if (!force && !needs_flush) 609 return; 610 611 if (mem_cgroup_is_root(memcg)) 612 WRITE_ONCE(flush_last_time, jiffies_64); 613 614 cgroup_rstat_flush(memcg->css.cgroup); 615 } 616 617 /* 618 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree 619 * @memcg: root of the subtree to flush 620 * 621 * Flushing is serialized by the underlying global rstat lock. There is also a 622 * minimum amount of work to be done even if there are no stat updates to flush. 623 * Hence, we only flush the stats if the updates delta exceeds a threshold. This 624 * avoids unnecessary work and contention on the underlying lock. 625 */ 626 void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 627 { 628 if (mem_cgroup_disabled()) 629 return; 630 631 if (!memcg) 632 memcg = root_mem_cgroup; 633 634 __mem_cgroup_flush_stats(memcg, false); 635 } 636 637 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 638 { 639 /* Only flush if the periodic flusher is one full cycle late */ 640 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) 641 mem_cgroup_flush_stats(memcg); 642 } 643 644 static void flush_memcg_stats_dwork(struct work_struct *w) 645 { 646 /* 647 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing 648 * in latency-sensitive paths is as cheap as possible. 649 */ 650 __mem_cgroup_flush_stats(root_mem_cgroup, true); 651 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); 652 } 653 654 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 655 { 656 long x; 657 int i = memcg_stats_index(idx); 658 659 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 660 return 0; 661 662 x = READ_ONCE(memcg->vmstats->state[i]); 663 #ifdef CONFIG_SMP 664 if (x < 0) 665 x = 0; 666 #endif 667 return x; 668 } 669 670 static int memcg_page_state_unit(int item); 671 672 /* 673 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round 674 * up non-zero sub-page updates to 1 page as zero page updates are ignored. 675 */ 676 static int memcg_state_val_in_pages(int idx, int val) 677 { 678 int unit = memcg_page_state_unit(idx); 679 680 if (!val || unit == PAGE_SIZE) 681 return val; 682 else 683 return max(val * unit / PAGE_SIZE, 1UL); 684 } 685 686 /** 687 * __mod_memcg_state - update cgroup memory statistics 688 * @memcg: the memory cgroup 689 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 690 * @val: delta to add to the counter, can be negative 691 */ 692 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 693 int val) 694 { 695 int i = memcg_stats_index(idx); 696 697 if (mem_cgroup_disabled()) 698 return; 699 700 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 701 return; 702 703 __this_cpu_add(memcg->vmstats_percpu->state[i], val); 704 val = memcg_state_val_in_pages(idx, val); 705 memcg_rstat_updated(memcg, val); 706 trace_mod_memcg_state(memcg, idx, val); 707 } 708 709 #ifdef CONFIG_MEMCG_V1 710 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 711 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 712 { 713 long x; 714 int i = memcg_stats_index(idx); 715 716 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 717 return 0; 718 719 x = READ_ONCE(memcg->vmstats->state_local[i]); 720 #ifdef CONFIG_SMP 721 if (x < 0) 722 x = 0; 723 #endif 724 return x; 725 } 726 #endif 727 728 static void __mod_memcg_lruvec_state(struct lruvec *lruvec, 729 enum node_stat_item idx, 730 int val) 731 { 732 struct mem_cgroup_per_node *pn; 733 struct mem_cgroup *memcg; 734 int i = memcg_stats_index(idx); 735 736 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 737 return; 738 739 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 740 memcg = pn->memcg; 741 742 /* 743 * The caller from rmap relies on disabled preemption because they never 744 * update their counter from in-interrupt context. For these two 745 * counters we check that the update is never performed from an 746 * interrupt context while other caller need to have disabled interrupt. 747 */ 748 __memcg_stats_lock(); 749 if (IS_ENABLED(CONFIG_DEBUG_VM)) { 750 switch (idx) { 751 case NR_ANON_MAPPED: 752 case NR_FILE_MAPPED: 753 case NR_ANON_THPS: 754 WARN_ON_ONCE(!in_task()); 755 break; 756 default: 757 VM_WARN_ON_IRQS_ENABLED(); 758 } 759 } 760 761 /* Update memcg */ 762 __this_cpu_add(memcg->vmstats_percpu->state[i], val); 763 764 /* Update lruvec */ 765 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val); 766 767 val = memcg_state_val_in_pages(idx, val); 768 memcg_rstat_updated(memcg, val); 769 trace_mod_memcg_lruvec_state(memcg, idx, val); 770 memcg_stats_unlock(); 771 } 772 773 /** 774 * __mod_lruvec_state - update lruvec memory statistics 775 * @lruvec: the lruvec 776 * @idx: the stat item 777 * @val: delta to add to the counter, can be negative 778 * 779 * The lruvec is the intersection of the NUMA node and a cgroup. This 780 * function updates the all three counters that are affected by a 781 * change of state at this level: per-node, per-cgroup, per-lruvec. 782 */ 783 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 784 int val) 785 { 786 /* Update node */ 787 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 788 789 /* Update memcg and lruvec */ 790 if (!mem_cgroup_disabled()) 791 __mod_memcg_lruvec_state(lruvec, idx, val); 792 } 793 794 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 795 int val) 796 { 797 struct mem_cgroup *memcg; 798 pg_data_t *pgdat = folio_pgdat(folio); 799 struct lruvec *lruvec; 800 801 rcu_read_lock(); 802 memcg = folio_memcg(folio); 803 /* Untracked pages have no memcg, no lruvec. Update only the node */ 804 if (!memcg) { 805 rcu_read_unlock(); 806 __mod_node_page_state(pgdat, idx, val); 807 return; 808 } 809 810 lruvec = mem_cgroup_lruvec(memcg, pgdat); 811 __mod_lruvec_state(lruvec, idx, val); 812 rcu_read_unlock(); 813 } 814 EXPORT_SYMBOL(__lruvec_stat_mod_folio); 815 816 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 817 { 818 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 819 struct mem_cgroup *memcg; 820 struct lruvec *lruvec; 821 822 rcu_read_lock(); 823 memcg = mem_cgroup_from_slab_obj(p); 824 825 /* 826 * Untracked pages have no memcg, no lruvec. Update only the 827 * node. If we reparent the slab objects to the root memcg, 828 * when we free the slab object, we need to update the per-memcg 829 * vmstats to keep it correct for the root memcg. 830 */ 831 if (!memcg) { 832 __mod_node_page_state(pgdat, idx, val); 833 } else { 834 lruvec = mem_cgroup_lruvec(memcg, pgdat); 835 __mod_lruvec_state(lruvec, idx, val); 836 } 837 rcu_read_unlock(); 838 } 839 840 /** 841 * __count_memcg_events - account VM events in a cgroup 842 * @memcg: the memory cgroup 843 * @idx: the event item 844 * @count: the number of events that occurred 845 */ 846 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 847 unsigned long count) 848 { 849 int i = memcg_events_index(idx); 850 851 if (mem_cgroup_disabled()) 852 return; 853 854 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 855 return; 856 857 memcg_stats_lock(); 858 __this_cpu_add(memcg->vmstats_percpu->events[i], count); 859 memcg_rstat_updated(memcg, count); 860 trace_count_memcg_events(memcg, idx, count); 861 memcg_stats_unlock(); 862 } 863 864 unsigned long memcg_events(struct mem_cgroup *memcg, int event) 865 { 866 int i = memcg_events_index(event); 867 868 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 869 return 0; 870 871 return READ_ONCE(memcg->vmstats->events[i]); 872 } 873 874 #ifdef CONFIG_MEMCG_V1 875 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 876 { 877 int i = memcg_events_index(event); 878 879 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 880 return 0; 881 882 return READ_ONCE(memcg->vmstats->events_local[i]); 883 } 884 #endif 885 886 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 887 { 888 /* 889 * mm_update_next_owner() may clear mm->owner to NULL 890 * if it races with swapoff, page migration, etc. 891 * So this can be called with p == NULL. 892 */ 893 if (unlikely(!p)) 894 return NULL; 895 896 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 897 } 898 EXPORT_SYMBOL(mem_cgroup_from_task); 899 900 static __always_inline struct mem_cgroup *active_memcg(void) 901 { 902 if (!in_task()) 903 return this_cpu_read(int_active_memcg); 904 else 905 return current->active_memcg; 906 } 907 908 /** 909 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 910 * @mm: mm from which memcg should be extracted. It can be NULL. 911 * 912 * Obtain a reference on mm->memcg and returns it if successful. If mm 913 * is NULL, then the memcg is chosen as follows: 914 * 1) The active memcg, if set. 915 * 2) current->mm->memcg, if available 916 * 3) root memcg 917 * If mem_cgroup is disabled, NULL is returned. 918 */ 919 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 920 { 921 struct mem_cgroup *memcg; 922 923 if (mem_cgroup_disabled()) 924 return NULL; 925 926 /* 927 * Page cache insertions can happen without an 928 * actual mm context, e.g. during disk probing 929 * on boot, loopback IO, acct() writes etc. 930 * 931 * No need to css_get on root memcg as the reference 932 * counting is disabled on the root level in the 933 * cgroup core. See CSS_NO_REF. 934 */ 935 if (unlikely(!mm)) { 936 memcg = active_memcg(); 937 if (unlikely(memcg)) { 938 /* remote memcg must hold a ref */ 939 css_get(&memcg->css); 940 return memcg; 941 } 942 mm = current->mm; 943 if (unlikely(!mm)) 944 return root_mem_cgroup; 945 } 946 947 rcu_read_lock(); 948 do { 949 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 950 if (unlikely(!memcg)) 951 memcg = root_mem_cgroup; 952 } while (!css_tryget(&memcg->css)); 953 rcu_read_unlock(); 954 return memcg; 955 } 956 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 957 958 /** 959 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg. 960 */ 961 struct mem_cgroup *get_mem_cgroup_from_current(void) 962 { 963 struct mem_cgroup *memcg; 964 965 if (mem_cgroup_disabled()) 966 return NULL; 967 968 again: 969 rcu_read_lock(); 970 memcg = mem_cgroup_from_task(current); 971 if (!css_tryget(&memcg->css)) { 972 rcu_read_unlock(); 973 goto again; 974 } 975 rcu_read_unlock(); 976 return memcg; 977 } 978 979 /** 980 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg. 981 * @folio: folio from which memcg should be extracted. 982 */ 983 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 984 { 985 struct mem_cgroup *memcg = folio_memcg(folio); 986 987 if (mem_cgroup_disabled()) 988 return NULL; 989 990 rcu_read_lock(); 991 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 992 memcg = root_mem_cgroup; 993 rcu_read_unlock(); 994 return memcg; 995 } 996 997 /** 998 * mem_cgroup_iter - iterate over memory cgroup hierarchy 999 * @root: hierarchy root 1000 * @prev: previously returned memcg, NULL on first invocation 1001 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1002 * 1003 * Returns references to children of the hierarchy below @root, or 1004 * @root itself, or %NULL after a full round-trip. 1005 * 1006 * Caller must pass the return value in @prev on subsequent 1007 * invocations for reference counting, or use mem_cgroup_iter_break() 1008 * to cancel a hierarchy walk before the round-trip is complete. 1009 * 1010 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1011 * in the hierarchy among all concurrent reclaimers operating on the 1012 * same node. 1013 */ 1014 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1015 struct mem_cgroup *prev, 1016 struct mem_cgroup_reclaim_cookie *reclaim) 1017 { 1018 struct mem_cgroup_reclaim_iter *iter; 1019 struct cgroup_subsys_state *css; 1020 struct mem_cgroup *pos; 1021 struct mem_cgroup *next; 1022 1023 if (mem_cgroup_disabled()) 1024 return NULL; 1025 1026 if (!root) 1027 root = root_mem_cgroup; 1028 1029 rcu_read_lock(); 1030 restart: 1031 next = NULL; 1032 1033 if (reclaim) { 1034 int gen; 1035 int nid = reclaim->pgdat->node_id; 1036 1037 iter = &root->nodeinfo[nid]->iter; 1038 gen = atomic_read(&iter->generation); 1039 1040 /* 1041 * On start, join the current reclaim iteration cycle. 1042 * Exit when a concurrent walker completes it. 1043 */ 1044 if (!prev) 1045 reclaim->generation = gen; 1046 else if (reclaim->generation != gen) 1047 goto out_unlock; 1048 1049 pos = READ_ONCE(iter->position); 1050 } else 1051 pos = prev; 1052 1053 css = pos ? &pos->css : NULL; 1054 1055 while ((css = css_next_descendant_pre(css, &root->css))) { 1056 /* 1057 * Verify the css and acquire a reference. The root 1058 * is provided by the caller, so we know it's alive 1059 * and kicking, and don't take an extra reference. 1060 */ 1061 if (css == &root->css || css_tryget(css)) 1062 break; 1063 } 1064 1065 next = mem_cgroup_from_css(css); 1066 1067 if (reclaim) { 1068 /* 1069 * The position could have already been updated by a competing 1070 * thread, so check that the value hasn't changed since we read 1071 * it to avoid reclaiming from the same cgroup twice. 1072 */ 1073 if (cmpxchg(&iter->position, pos, next) != pos) { 1074 if (css && css != &root->css) 1075 css_put(css); 1076 goto restart; 1077 } 1078 1079 if (!next) { 1080 atomic_inc(&iter->generation); 1081 1082 /* 1083 * Reclaimers share the hierarchy walk, and a 1084 * new one might jump in right at the end of 1085 * the hierarchy - make sure they see at least 1086 * one group and restart from the beginning. 1087 */ 1088 if (!prev) 1089 goto restart; 1090 } 1091 } 1092 1093 out_unlock: 1094 rcu_read_unlock(); 1095 if (prev && prev != root) 1096 css_put(&prev->css); 1097 1098 return next; 1099 } 1100 1101 /** 1102 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1103 * @root: hierarchy root 1104 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1105 */ 1106 void mem_cgroup_iter_break(struct mem_cgroup *root, 1107 struct mem_cgroup *prev) 1108 { 1109 if (!root) 1110 root = root_mem_cgroup; 1111 if (prev && prev != root) 1112 css_put(&prev->css); 1113 } 1114 1115 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1116 struct mem_cgroup *dead_memcg) 1117 { 1118 struct mem_cgroup_reclaim_iter *iter; 1119 struct mem_cgroup_per_node *mz; 1120 int nid; 1121 1122 for_each_node(nid) { 1123 mz = from->nodeinfo[nid]; 1124 iter = &mz->iter; 1125 cmpxchg(&iter->position, dead_memcg, NULL); 1126 } 1127 } 1128 1129 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1130 { 1131 struct mem_cgroup *memcg = dead_memcg; 1132 struct mem_cgroup *last; 1133 1134 do { 1135 __invalidate_reclaim_iterators(memcg, dead_memcg); 1136 last = memcg; 1137 } while ((memcg = parent_mem_cgroup(memcg))); 1138 1139 /* 1140 * When cgroup1 non-hierarchy mode is used, 1141 * parent_mem_cgroup() does not walk all the way up to the 1142 * cgroup root (root_mem_cgroup). So we have to handle 1143 * dead_memcg from cgroup root separately. 1144 */ 1145 if (!mem_cgroup_is_root(last)) 1146 __invalidate_reclaim_iterators(root_mem_cgroup, 1147 dead_memcg); 1148 } 1149 1150 /** 1151 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1152 * @memcg: hierarchy root 1153 * @fn: function to call for each task 1154 * @arg: argument passed to @fn 1155 * 1156 * This function iterates over tasks attached to @memcg or to any of its 1157 * descendants and calls @fn for each task. If @fn returns a non-zero 1158 * value, the function breaks the iteration loop. Otherwise, it will iterate 1159 * over all tasks and return 0. 1160 * 1161 * This function must not be called for the root memory cgroup. 1162 */ 1163 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1164 int (*fn)(struct task_struct *, void *), void *arg) 1165 { 1166 struct mem_cgroup *iter; 1167 int ret = 0; 1168 int i = 0; 1169 1170 BUG_ON(mem_cgroup_is_root(memcg)); 1171 1172 for_each_mem_cgroup_tree(iter, memcg) { 1173 struct css_task_iter it; 1174 struct task_struct *task; 1175 1176 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1177 while (!ret && (task = css_task_iter_next(&it))) { 1178 /* Avoid potential softlockup warning */ 1179 if ((++i & 1023) == 0) 1180 cond_resched(); 1181 ret = fn(task, arg); 1182 } 1183 css_task_iter_end(&it); 1184 if (ret) { 1185 mem_cgroup_iter_break(memcg, iter); 1186 break; 1187 } 1188 } 1189 } 1190 1191 #ifdef CONFIG_DEBUG_VM 1192 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1193 { 1194 struct mem_cgroup *memcg; 1195 1196 if (mem_cgroup_disabled()) 1197 return; 1198 1199 memcg = folio_memcg(folio); 1200 1201 if (!memcg) 1202 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1203 else 1204 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1205 } 1206 #endif 1207 1208 /** 1209 * folio_lruvec_lock - Lock the lruvec for a folio. 1210 * @folio: Pointer to the folio. 1211 * 1212 * These functions are safe to use under any of the following conditions: 1213 * - folio locked 1214 * - folio_test_lru false 1215 * - folio frozen (refcount of 0) 1216 * 1217 * Return: The lruvec this folio is on with its lock held. 1218 */ 1219 struct lruvec *folio_lruvec_lock(struct folio *folio) 1220 { 1221 struct lruvec *lruvec = folio_lruvec(folio); 1222 1223 spin_lock(&lruvec->lru_lock); 1224 lruvec_memcg_debug(lruvec, folio); 1225 1226 return lruvec; 1227 } 1228 1229 /** 1230 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1231 * @folio: Pointer to the folio. 1232 * 1233 * These functions are safe to use under any of the following conditions: 1234 * - folio locked 1235 * - folio_test_lru false 1236 * - folio frozen (refcount of 0) 1237 * 1238 * Return: The lruvec this folio is on with its lock held and interrupts 1239 * disabled. 1240 */ 1241 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1242 { 1243 struct lruvec *lruvec = folio_lruvec(folio); 1244 1245 spin_lock_irq(&lruvec->lru_lock); 1246 lruvec_memcg_debug(lruvec, folio); 1247 1248 return lruvec; 1249 } 1250 1251 /** 1252 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1253 * @folio: Pointer to the folio. 1254 * @flags: Pointer to irqsave flags. 1255 * 1256 * These functions are safe to use under any of the following conditions: 1257 * - folio locked 1258 * - folio_test_lru false 1259 * - folio frozen (refcount of 0) 1260 * 1261 * Return: The lruvec this folio is on with its lock held and interrupts 1262 * disabled. 1263 */ 1264 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1265 unsigned long *flags) 1266 { 1267 struct lruvec *lruvec = folio_lruvec(folio); 1268 1269 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1270 lruvec_memcg_debug(lruvec, folio); 1271 1272 return lruvec; 1273 } 1274 1275 /** 1276 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1277 * @lruvec: mem_cgroup per zone lru vector 1278 * @lru: index of lru list the page is sitting on 1279 * @zid: zone id of the accounted pages 1280 * @nr_pages: positive when adding or negative when removing 1281 * 1282 * This function must be called under lru_lock, just before a page is added 1283 * to or just after a page is removed from an lru list. 1284 */ 1285 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1286 int zid, int nr_pages) 1287 { 1288 struct mem_cgroup_per_node *mz; 1289 unsigned long *lru_size; 1290 long size; 1291 1292 if (mem_cgroup_disabled()) 1293 return; 1294 1295 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1296 lru_size = &mz->lru_zone_size[zid][lru]; 1297 1298 if (nr_pages < 0) 1299 *lru_size += nr_pages; 1300 1301 size = *lru_size; 1302 if (WARN_ONCE(size < 0, 1303 "%s(%p, %d, %d): lru_size %ld\n", 1304 __func__, lruvec, lru, nr_pages, size)) { 1305 VM_BUG_ON(1); 1306 *lru_size = 0; 1307 } 1308 1309 if (nr_pages > 0) 1310 *lru_size += nr_pages; 1311 } 1312 1313 /** 1314 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1315 * @memcg: the memory cgroup 1316 * 1317 * Returns the maximum amount of memory @mem can be charged with, in 1318 * pages. 1319 */ 1320 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1321 { 1322 unsigned long margin = 0; 1323 unsigned long count; 1324 unsigned long limit; 1325 1326 count = page_counter_read(&memcg->memory); 1327 limit = READ_ONCE(memcg->memory.max); 1328 if (count < limit) 1329 margin = limit - count; 1330 1331 if (do_memsw_account()) { 1332 count = page_counter_read(&memcg->memsw); 1333 limit = READ_ONCE(memcg->memsw.max); 1334 if (count < limit) 1335 margin = min(margin, limit - count); 1336 else 1337 margin = 0; 1338 } 1339 1340 return margin; 1341 } 1342 1343 struct memory_stat { 1344 const char *name; 1345 unsigned int idx; 1346 }; 1347 1348 static const struct memory_stat memory_stats[] = { 1349 { "anon", NR_ANON_MAPPED }, 1350 { "file", NR_FILE_PAGES }, 1351 { "kernel", MEMCG_KMEM }, 1352 { "kernel_stack", NR_KERNEL_STACK_KB }, 1353 { "pagetables", NR_PAGETABLE }, 1354 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1355 { "percpu", MEMCG_PERCPU_B }, 1356 { "sock", MEMCG_SOCK }, 1357 { "vmalloc", MEMCG_VMALLOC }, 1358 { "shmem", NR_SHMEM }, 1359 #ifdef CONFIG_ZSWAP 1360 { "zswap", MEMCG_ZSWAP_B }, 1361 { "zswapped", MEMCG_ZSWAPPED }, 1362 #endif 1363 { "file_mapped", NR_FILE_MAPPED }, 1364 { "file_dirty", NR_FILE_DIRTY }, 1365 { "file_writeback", NR_WRITEBACK }, 1366 #ifdef CONFIG_SWAP 1367 { "swapcached", NR_SWAPCACHE }, 1368 #endif 1369 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1370 { "anon_thp", NR_ANON_THPS }, 1371 { "file_thp", NR_FILE_THPS }, 1372 { "shmem_thp", NR_SHMEM_THPS }, 1373 #endif 1374 { "inactive_anon", NR_INACTIVE_ANON }, 1375 { "active_anon", NR_ACTIVE_ANON }, 1376 { "inactive_file", NR_INACTIVE_FILE }, 1377 { "active_file", NR_ACTIVE_FILE }, 1378 { "unevictable", NR_UNEVICTABLE }, 1379 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1380 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1381 #ifdef CONFIG_HUGETLB_PAGE 1382 { "hugetlb", NR_HUGETLB }, 1383 #endif 1384 1385 /* The memory events */ 1386 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1387 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1388 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1389 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1390 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1391 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1392 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1393 1394 { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, 1395 { "pgdemote_direct", PGDEMOTE_DIRECT }, 1396 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, 1397 #ifdef CONFIG_NUMA_BALANCING 1398 { "pgpromote_success", PGPROMOTE_SUCCESS }, 1399 #endif 1400 }; 1401 1402 /* The actual unit of the state item, not the same as the output unit */ 1403 static int memcg_page_state_unit(int item) 1404 { 1405 switch (item) { 1406 case MEMCG_PERCPU_B: 1407 case MEMCG_ZSWAP_B: 1408 case NR_SLAB_RECLAIMABLE_B: 1409 case NR_SLAB_UNRECLAIMABLE_B: 1410 return 1; 1411 case NR_KERNEL_STACK_KB: 1412 return SZ_1K; 1413 default: 1414 return PAGE_SIZE; 1415 } 1416 } 1417 1418 /* Translate stat items to the correct unit for memory.stat output */ 1419 static int memcg_page_state_output_unit(int item) 1420 { 1421 /* 1422 * Workingset state is actually in pages, but we export it to userspace 1423 * as a scalar count of events, so special case it here. 1424 * 1425 * Demotion and promotion activities are exported in pages, consistent 1426 * with their global counterparts. 1427 */ 1428 switch (item) { 1429 case WORKINGSET_REFAULT_ANON: 1430 case WORKINGSET_REFAULT_FILE: 1431 case WORKINGSET_ACTIVATE_ANON: 1432 case WORKINGSET_ACTIVATE_FILE: 1433 case WORKINGSET_RESTORE_ANON: 1434 case WORKINGSET_RESTORE_FILE: 1435 case WORKINGSET_NODERECLAIM: 1436 case PGDEMOTE_KSWAPD: 1437 case PGDEMOTE_DIRECT: 1438 case PGDEMOTE_KHUGEPAGED: 1439 #ifdef CONFIG_NUMA_BALANCING 1440 case PGPROMOTE_SUCCESS: 1441 #endif 1442 return 1; 1443 default: 1444 return memcg_page_state_unit(item); 1445 } 1446 } 1447 1448 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) 1449 { 1450 return memcg_page_state(memcg, item) * 1451 memcg_page_state_output_unit(item); 1452 } 1453 1454 #ifdef CONFIG_MEMCG_V1 1455 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) 1456 { 1457 return memcg_page_state_local(memcg, item) * 1458 memcg_page_state_output_unit(item); 1459 } 1460 #endif 1461 1462 #ifdef CONFIG_HUGETLB_PAGE 1463 static bool memcg_accounts_hugetlb(void) 1464 { 1465 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; 1466 } 1467 #else /* CONFIG_HUGETLB_PAGE */ 1468 static bool memcg_accounts_hugetlb(void) 1469 { 1470 return false; 1471 } 1472 #endif /* CONFIG_HUGETLB_PAGE */ 1473 1474 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1475 { 1476 int i; 1477 1478 /* 1479 * Provide statistics on the state of the memory subsystem as 1480 * well as cumulative event counters that show past behavior. 1481 * 1482 * This list is ordered following a combination of these gradients: 1483 * 1) generic big picture -> specifics and details 1484 * 2) reflecting userspace activity -> reflecting kernel heuristics 1485 * 1486 * Current memory state: 1487 */ 1488 mem_cgroup_flush_stats(memcg); 1489 1490 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1491 u64 size; 1492 1493 #ifdef CONFIG_HUGETLB_PAGE 1494 if (unlikely(memory_stats[i].idx == NR_HUGETLB) && 1495 !memcg_accounts_hugetlb()) 1496 continue; 1497 #endif 1498 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1499 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); 1500 1501 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1502 size += memcg_page_state_output(memcg, 1503 NR_SLAB_RECLAIMABLE_B); 1504 seq_buf_printf(s, "slab %llu\n", size); 1505 } 1506 } 1507 1508 /* Accumulated memory events */ 1509 seq_buf_printf(s, "pgscan %lu\n", 1510 memcg_events(memcg, PGSCAN_KSWAPD) + 1511 memcg_events(memcg, PGSCAN_DIRECT) + 1512 memcg_events(memcg, PGSCAN_KHUGEPAGED)); 1513 seq_buf_printf(s, "pgsteal %lu\n", 1514 memcg_events(memcg, PGSTEAL_KSWAPD) + 1515 memcg_events(memcg, PGSTEAL_DIRECT) + 1516 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); 1517 1518 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1519 #ifdef CONFIG_MEMCG_V1 1520 if (memcg_vm_event_stat[i] == PGPGIN || 1521 memcg_vm_event_stat[i] == PGPGOUT) 1522 continue; 1523 #endif 1524 seq_buf_printf(s, "%s %lu\n", 1525 vm_event_name(memcg_vm_event_stat[i]), 1526 memcg_events(memcg, memcg_vm_event_stat[i])); 1527 } 1528 } 1529 1530 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1531 { 1532 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1533 memcg_stat_format(memcg, s); 1534 else 1535 memcg1_stat_format(memcg, s); 1536 if (seq_buf_has_overflowed(s)) 1537 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__); 1538 } 1539 1540 /** 1541 * mem_cgroup_print_oom_context: Print OOM information relevant to 1542 * memory controller. 1543 * @memcg: The memory cgroup that went over limit 1544 * @p: Task that is going to be killed 1545 * 1546 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1547 * enabled 1548 */ 1549 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1550 { 1551 rcu_read_lock(); 1552 1553 if (memcg) { 1554 pr_cont(",oom_memcg="); 1555 pr_cont_cgroup_path(memcg->css.cgroup); 1556 } else 1557 pr_cont(",global_oom"); 1558 if (p) { 1559 pr_cont(",task_memcg="); 1560 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1561 } 1562 rcu_read_unlock(); 1563 } 1564 1565 /** 1566 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1567 * memory controller. 1568 * @memcg: The memory cgroup that went over limit 1569 */ 1570 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1571 { 1572 /* Use static buffer, for the caller is holding oom_lock. */ 1573 static char buf[SEQ_BUF_SIZE]; 1574 struct seq_buf s; 1575 1576 lockdep_assert_held(&oom_lock); 1577 1578 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1579 K((u64)page_counter_read(&memcg->memory)), 1580 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1581 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1582 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1583 K((u64)page_counter_read(&memcg->swap)), 1584 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1585 #ifdef CONFIG_MEMCG_V1 1586 else { 1587 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1588 K((u64)page_counter_read(&memcg->memsw)), 1589 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1590 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1591 K((u64)page_counter_read(&memcg->kmem)), 1592 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1593 } 1594 #endif 1595 1596 pr_info("Memory cgroup stats for "); 1597 pr_cont_cgroup_path(memcg->css.cgroup); 1598 pr_cont(":"); 1599 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 1600 memory_stat_format(memcg, &s); 1601 seq_buf_do_printk(&s, KERN_INFO); 1602 } 1603 1604 /* 1605 * Return the memory (and swap, if configured) limit for a memcg. 1606 */ 1607 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1608 { 1609 unsigned long max = READ_ONCE(memcg->memory.max); 1610 1611 if (do_memsw_account()) { 1612 if (mem_cgroup_swappiness(memcg)) { 1613 /* Calculate swap excess capacity from memsw limit */ 1614 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1615 1616 max += min(swap, (unsigned long)total_swap_pages); 1617 } 1618 } else { 1619 if (mem_cgroup_swappiness(memcg)) 1620 max += min(READ_ONCE(memcg->swap.max), 1621 (unsigned long)total_swap_pages); 1622 } 1623 return max; 1624 } 1625 1626 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1627 { 1628 return page_counter_read(&memcg->memory); 1629 } 1630 1631 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1632 int order) 1633 { 1634 struct oom_control oc = { 1635 .zonelist = NULL, 1636 .nodemask = NULL, 1637 .memcg = memcg, 1638 .gfp_mask = gfp_mask, 1639 .order = order, 1640 }; 1641 bool ret = true; 1642 1643 if (mutex_lock_killable(&oom_lock)) 1644 return true; 1645 1646 if (mem_cgroup_margin(memcg) >= (1 << order)) 1647 goto unlock; 1648 1649 /* 1650 * A few threads which were not waiting at mutex_lock_killable() can 1651 * fail to bail out. Therefore, check again after holding oom_lock. 1652 */ 1653 ret = task_is_dying() || out_of_memory(&oc); 1654 1655 unlock: 1656 mutex_unlock(&oom_lock); 1657 return ret; 1658 } 1659 1660 /* 1661 * Returns true if successfully killed one or more processes. Though in some 1662 * corner cases it can return true even without killing any process. 1663 */ 1664 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1665 { 1666 bool locked, ret; 1667 1668 if (order > PAGE_ALLOC_COSTLY_ORDER) 1669 return false; 1670 1671 memcg_memory_event(memcg, MEMCG_OOM); 1672 1673 if (!memcg1_oom_prepare(memcg, &locked)) 1674 return false; 1675 1676 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1677 1678 memcg1_oom_finish(memcg, locked); 1679 1680 return ret; 1681 } 1682 1683 /** 1684 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1685 * @victim: task to be killed by the OOM killer 1686 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1687 * 1688 * Returns a pointer to a memory cgroup, which has to be cleaned up 1689 * by killing all belonging OOM-killable tasks. 1690 * 1691 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1692 */ 1693 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1694 struct mem_cgroup *oom_domain) 1695 { 1696 struct mem_cgroup *oom_group = NULL; 1697 struct mem_cgroup *memcg; 1698 1699 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1700 return NULL; 1701 1702 if (!oom_domain) 1703 oom_domain = root_mem_cgroup; 1704 1705 rcu_read_lock(); 1706 1707 memcg = mem_cgroup_from_task(victim); 1708 if (mem_cgroup_is_root(memcg)) 1709 goto out; 1710 1711 /* 1712 * If the victim task has been asynchronously moved to a different 1713 * memory cgroup, we might end up killing tasks outside oom_domain. 1714 * In this case it's better to ignore memory.group.oom. 1715 */ 1716 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1717 goto out; 1718 1719 /* 1720 * Traverse the memory cgroup hierarchy from the victim task's 1721 * cgroup up to the OOMing cgroup (or root) to find the 1722 * highest-level memory cgroup with oom.group set. 1723 */ 1724 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1725 if (READ_ONCE(memcg->oom_group)) 1726 oom_group = memcg; 1727 1728 if (memcg == oom_domain) 1729 break; 1730 } 1731 1732 if (oom_group) 1733 css_get(&oom_group->css); 1734 out: 1735 rcu_read_unlock(); 1736 1737 return oom_group; 1738 } 1739 1740 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1741 { 1742 pr_info("Tasks in "); 1743 pr_cont_cgroup_path(memcg->css.cgroup); 1744 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1745 } 1746 1747 struct memcg_stock_pcp { 1748 local_lock_t stock_lock; 1749 struct mem_cgroup *cached; /* this never be root cgroup */ 1750 unsigned int nr_pages; 1751 1752 struct obj_cgroup *cached_objcg; 1753 struct pglist_data *cached_pgdat; 1754 unsigned int nr_bytes; 1755 int nr_slab_reclaimable_b; 1756 int nr_slab_unreclaimable_b; 1757 1758 struct work_struct work; 1759 unsigned long flags; 1760 #define FLUSHING_CACHED_CHARGE 0 1761 }; 1762 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { 1763 .stock_lock = INIT_LOCAL_LOCK(stock_lock), 1764 }; 1765 static DEFINE_MUTEX(percpu_charge_mutex); 1766 1767 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock); 1768 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 1769 struct mem_cgroup *root_memcg); 1770 1771 /** 1772 * consume_stock: Try to consume stocked charge on this cpu. 1773 * @memcg: memcg to consume from. 1774 * @nr_pages: how many pages to charge. 1775 * 1776 * The charges will only happen if @memcg matches the current cpu's memcg 1777 * stock, and at least @nr_pages are available in that stock. Failure to 1778 * service an allocation will refill the stock. 1779 * 1780 * returns true if successful, false otherwise. 1781 */ 1782 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1783 { 1784 struct memcg_stock_pcp *stock; 1785 unsigned int stock_pages; 1786 unsigned long flags; 1787 bool ret = false; 1788 1789 if (nr_pages > MEMCG_CHARGE_BATCH) 1790 return ret; 1791 1792 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1793 1794 stock = this_cpu_ptr(&memcg_stock); 1795 stock_pages = READ_ONCE(stock->nr_pages); 1796 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { 1797 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages); 1798 ret = true; 1799 } 1800 1801 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1802 1803 return ret; 1804 } 1805 1806 /* 1807 * Returns stocks cached in percpu and reset cached information. 1808 */ 1809 static void drain_stock(struct memcg_stock_pcp *stock) 1810 { 1811 unsigned int stock_pages = READ_ONCE(stock->nr_pages); 1812 struct mem_cgroup *old = READ_ONCE(stock->cached); 1813 1814 if (!old) 1815 return; 1816 1817 if (stock_pages) { 1818 page_counter_uncharge(&old->memory, stock_pages); 1819 if (do_memsw_account()) 1820 page_counter_uncharge(&old->memsw, stock_pages); 1821 1822 WRITE_ONCE(stock->nr_pages, 0); 1823 } 1824 1825 css_put(&old->css); 1826 WRITE_ONCE(stock->cached, NULL); 1827 } 1828 1829 static void drain_local_stock(struct work_struct *dummy) 1830 { 1831 struct memcg_stock_pcp *stock; 1832 struct obj_cgroup *old = NULL; 1833 unsigned long flags; 1834 1835 /* 1836 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. 1837 * drain_stock races is that we always operate on local CPU stock 1838 * here with IRQ disabled 1839 */ 1840 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1841 1842 stock = this_cpu_ptr(&memcg_stock); 1843 old = drain_obj_stock(stock); 1844 drain_stock(stock); 1845 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1846 1847 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1848 obj_cgroup_put(old); 1849 } 1850 1851 /* 1852 * Cache charges(val) to local per_cpu area. 1853 * This will be consumed by consume_stock() function, later. 1854 */ 1855 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1856 { 1857 struct memcg_stock_pcp *stock; 1858 unsigned int stock_pages; 1859 1860 stock = this_cpu_ptr(&memcg_stock); 1861 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ 1862 drain_stock(stock); 1863 css_get(&memcg->css); 1864 WRITE_ONCE(stock->cached, memcg); 1865 } 1866 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages; 1867 WRITE_ONCE(stock->nr_pages, stock_pages); 1868 1869 if (stock_pages > MEMCG_CHARGE_BATCH) 1870 drain_stock(stock); 1871 } 1872 1873 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1874 { 1875 unsigned long flags; 1876 1877 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1878 __refill_stock(memcg, nr_pages); 1879 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1880 } 1881 1882 /* 1883 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1884 * of the hierarchy under it. 1885 */ 1886 void drain_all_stock(struct mem_cgroup *root_memcg) 1887 { 1888 int cpu, curcpu; 1889 1890 /* If someone's already draining, avoid adding running more workers. */ 1891 if (!mutex_trylock(&percpu_charge_mutex)) 1892 return; 1893 /* 1894 * Notify other cpus that system-wide "drain" is running 1895 * We do not care about races with the cpu hotplug because cpu down 1896 * as well as workers from this path always operate on the local 1897 * per-cpu data. CPU up doesn't touch memcg_stock at all. 1898 */ 1899 migrate_disable(); 1900 curcpu = smp_processor_id(); 1901 for_each_online_cpu(cpu) { 1902 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1903 struct mem_cgroup *memcg; 1904 bool flush = false; 1905 1906 rcu_read_lock(); 1907 memcg = READ_ONCE(stock->cached); 1908 if (memcg && READ_ONCE(stock->nr_pages) && 1909 mem_cgroup_is_descendant(memcg, root_memcg)) 1910 flush = true; 1911 else if (obj_stock_flush_required(stock, root_memcg)) 1912 flush = true; 1913 rcu_read_unlock(); 1914 1915 if (flush && 1916 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1917 if (cpu == curcpu) 1918 drain_local_stock(&stock->work); 1919 else if (!cpu_is_isolated(cpu)) 1920 schedule_work_on(cpu, &stock->work); 1921 } 1922 } 1923 migrate_enable(); 1924 mutex_unlock(&percpu_charge_mutex); 1925 } 1926 1927 static int memcg_hotplug_cpu_dead(unsigned int cpu) 1928 { 1929 struct memcg_stock_pcp *stock; 1930 struct obj_cgroup *old; 1931 unsigned long flags; 1932 1933 stock = &per_cpu(memcg_stock, cpu); 1934 1935 /* drain_obj_stock requires stock_lock */ 1936 local_lock_irqsave(&memcg_stock.stock_lock, flags); 1937 old = drain_obj_stock(stock); 1938 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 1939 1940 drain_stock(stock); 1941 obj_cgroup_put(old); 1942 1943 return 0; 1944 } 1945 1946 static unsigned long reclaim_high(struct mem_cgroup *memcg, 1947 unsigned int nr_pages, 1948 gfp_t gfp_mask) 1949 { 1950 unsigned long nr_reclaimed = 0; 1951 1952 do { 1953 unsigned long pflags; 1954 1955 if (page_counter_read(&memcg->memory) <= 1956 READ_ONCE(memcg->memory.high)) 1957 continue; 1958 1959 memcg_memory_event(memcg, MEMCG_HIGH); 1960 1961 psi_memstall_enter(&pflags); 1962 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 1963 gfp_mask, 1964 MEMCG_RECLAIM_MAY_SWAP, 1965 NULL); 1966 psi_memstall_leave(&pflags); 1967 } while ((memcg = parent_mem_cgroup(memcg)) && 1968 !mem_cgroup_is_root(memcg)); 1969 1970 return nr_reclaimed; 1971 } 1972 1973 static void high_work_func(struct work_struct *work) 1974 { 1975 struct mem_cgroup *memcg; 1976 1977 memcg = container_of(work, struct mem_cgroup, high_work); 1978 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 1979 } 1980 1981 /* 1982 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 1983 * enough to still cause a significant slowdown in most cases, while still 1984 * allowing diagnostics and tracing to proceed without becoming stuck. 1985 */ 1986 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 1987 1988 /* 1989 * When calculating the delay, we use these either side of the exponentiation to 1990 * maintain precision and scale to a reasonable number of jiffies (see the table 1991 * below. 1992 * 1993 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 1994 * overage ratio to a delay. 1995 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 1996 * proposed penalty in order to reduce to a reasonable number of jiffies, and 1997 * to produce a reasonable delay curve. 1998 * 1999 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2000 * reasonable delay curve compared to precision-adjusted overage, not 2001 * penalising heavily at first, but still making sure that growth beyond the 2002 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2003 * example, with a high of 100 megabytes: 2004 * 2005 * +-------+------------------------+ 2006 * | usage | time to allocate in ms | 2007 * +-------+------------------------+ 2008 * | 100M | 0 | 2009 * | 101M | 6 | 2010 * | 102M | 25 | 2011 * | 103M | 57 | 2012 * | 104M | 102 | 2013 * | 105M | 159 | 2014 * | 106M | 230 | 2015 * | 107M | 313 | 2016 * | 108M | 409 | 2017 * | 109M | 518 | 2018 * | 110M | 639 | 2019 * | 111M | 774 | 2020 * | 112M | 921 | 2021 * | 113M | 1081 | 2022 * | 114M | 1254 | 2023 * | 115M | 1439 | 2024 * | 116M | 1638 | 2025 * | 117M | 1849 | 2026 * | 118M | 2000 | 2027 * | 119M | 2000 | 2028 * | 120M | 2000 | 2029 * +-------+------------------------+ 2030 */ 2031 #define MEMCG_DELAY_PRECISION_SHIFT 20 2032 #define MEMCG_DELAY_SCALING_SHIFT 14 2033 2034 static u64 calculate_overage(unsigned long usage, unsigned long high) 2035 { 2036 u64 overage; 2037 2038 if (usage <= high) 2039 return 0; 2040 2041 /* 2042 * Prevent division by 0 in overage calculation by acting as if 2043 * it was a threshold of 1 page 2044 */ 2045 high = max(high, 1UL); 2046 2047 overage = usage - high; 2048 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2049 return div64_u64(overage, high); 2050 } 2051 2052 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2053 { 2054 u64 overage, max_overage = 0; 2055 2056 do { 2057 overage = calculate_overage(page_counter_read(&memcg->memory), 2058 READ_ONCE(memcg->memory.high)); 2059 max_overage = max(overage, max_overage); 2060 } while ((memcg = parent_mem_cgroup(memcg)) && 2061 !mem_cgroup_is_root(memcg)); 2062 2063 return max_overage; 2064 } 2065 2066 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2067 { 2068 u64 overage, max_overage = 0; 2069 2070 do { 2071 overage = calculate_overage(page_counter_read(&memcg->swap), 2072 READ_ONCE(memcg->swap.high)); 2073 if (overage) 2074 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2075 max_overage = max(overage, max_overage); 2076 } while ((memcg = parent_mem_cgroup(memcg)) && 2077 !mem_cgroup_is_root(memcg)); 2078 2079 return max_overage; 2080 } 2081 2082 /* 2083 * Get the number of jiffies that we should penalise a mischievous cgroup which 2084 * is exceeding its memory.high by checking both it and its ancestors. 2085 */ 2086 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2087 unsigned int nr_pages, 2088 u64 max_overage) 2089 { 2090 unsigned long penalty_jiffies; 2091 2092 if (!max_overage) 2093 return 0; 2094 2095 /* 2096 * We use overage compared to memory.high to calculate the number of 2097 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2098 * fairly lenient on small overages, and increasingly harsh when the 2099 * memcg in question makes it clear that it has no intention of stopping 2100 * its crazy behaviour, so we exponentially increase the delay based on 2101 * overage amount. 2102 */ 2103 penalty_jiffies = max_overage * max_overage * HZ; 2104 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2105 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2106 2107 /* 2108 * Factor in the task's own contribution to the overage, such that four 2109 * N-sized allocations are throttled approximately the same as one 2110 * 4N-sized allocation. 2111 * 2112 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2113 * larger the current charge patch is than that. 2114 */ 2115 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2116 } 2117 2118 /* 2119 * Reclaims memory over the high limit. Called directly from 2120 * try_charge() (context permitting), as well as from the userland 2121 * return path where reclaim is always able to block. 2122 */ 2123 void mem_cgroup_handle_over_high(gfp_t gfp_mask) 2124 { 2125 unsigned long penalty_jiffies; 2126 unsigned long pflags; 2127 unsigned long nr_reclaimed; 2128 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2129 int nr_retries = MAX_RECLAIM_RETRIES; 2130 struct mem_cgroup *memcg; 2131 bool in_retry = false; 2132 2133 if (likely(!nr_pages)) 2134 return; 2135 2136 memcg = get_mem_cgroup_from_mm(current->mm); 2137 current->memcg_nr_pages_over_high = 0; 2138 2139 retry_reclaim: 2140 /* 2141 * Bail if the task is already exiting. Unlike memory.max, 2142 * memory.high enforcement isn't as strict, and there is no 2143 * OOM killer involved, which means the excess could already 2144 * be much bigger (and still growing) than it could for 2145 * memory.max; the dying task could get stuck in fruitless 2146 * reclaim for a long time, which isn't desirable. 2147 */ 2148 if (task_is_dying()) 2149 goto out; 2150 2151 /* 2152 * The allocating task should reclaim at least the batch size, but for 2153 * subsequent retries we only want to do what's necessary to prevent oom 2154 * or breaching resource isolation. 2155 * 2156 * This is distinct from memory.max or page allocator behaviour because 2157 * memory.high is currently batched, whereas memory.max and the page 2158 * allocator run every time an allocation is made. 2159 */ 2160 nr_reclaimed = reclaim_high(memcg, 2161 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2162 gfp_mask); 2163 2164 /* 2165 * memory.high is breached and reclaim is unable to keep up. Throttle 2166 * allocators proactively to slow down excessive growth. 2167 */ 2168 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2169 mem_find_max_overage(memcg)); 2170 2171 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2172 swap_find_max_overage(memcg)); 2173 2174 /* 2175 * Clamp the max delay per usermode return so as to still keep the 2176 * application moving forwards and also permit diagnostics, albeit 2177 * extremely slowly. 2178 */ 2179 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2180 2181 /* 2182 * Don't sleep if the amount of jiffies this memcg owes us is so low 2183 * that it's not even worth doing, in an attempt to be nice to those who 2184 * go only a small amount over their memory.high value and maybe haven't 2185 * been aggressively reclaimed enough yet. 2186 */ 2187 if (penalty_jiffies <= HZ / 100) 2188 goto out; 2189 2190 /* 2191 * If reclaim is making forward progress but we're still over 2192 * memory.high, we want to encourage that rather than doing allocator 2193 * throttling. 2194 */ 2195 if (nr_reclaimed || nr_retries--) { 2196 in_retry = true; 2197 goto retry_reclaim; 2198 } 2199 2200 /* 2201 * Reclaim didn't manage to push usage below the limit, slow 2202 * this allocating task down. 2203 * 2204 * If we exit early, we're guaranteed to die (since 2205 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2206 * need to account for any ill-begotten jiffies to pay them off later. 2207 */ 2208 psi_memstall_enter(&pflags); 2209 schedule_timeout_killable(penalty_jiffies); 2210 psi_memstall_leave(&pflags); 2211 2212 out: 2213 css_put(&memcg->css); 2214 } 2215 2216 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2217 unsigned int nr_pages) 2218 { 2219 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2220 int nr_retries = MAX_RECLAIM_RETRIES; 2221 struct mem_cgroup *mem_over_limit; 2222 struct page_counter *counter; 2223 unsigned long nr_reclaimed; 2224 bool passed_oom = false; 2225 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2226 bool drained = false; 2227 bool raised_max_event = false; 2228 unsigned long pflags; 2229 2230 retry: 2231 if (consume_stock(memcg, nr_pages)) 2232 return 0; 2233 2234 if (!do_memsw_account() || 2235 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2236 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2237 goto done_restock; 2238 if (do_memsw_account()) 2239 page_counter_uncharge(&memcg->memsw, batch); 2240 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2241 } else { 2242 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2243 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2244 } 2245 2246 if (batch > nr_pages) { 2247 batch = nr_pages; 2248 goto retry; 2249 } 2250 2251 /* 2252 * Prevent unbounded recursion when reclaim operations need to 2253 * allocate memory. This might exceed the limits temporarily, 2254 * but we prefer facilitating memory reclaim and getting back 2255 * under the limit over triggering OOM kills in these cases. 2256 */ 2257 if (unlikely(current->flags & PF_MEMALLOC)) 2258 goto force; 2259 2260 if (unlikely(task_in_memcg_oom(current))) 2261 goto nomem; 2262 2263 if (!gfpflags_allow_blocking(gfp_mask)) 2264 goto nomem; 2265 2266 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2267 raised_max_event = true; 2268 2269 psi_memstall_enter(&pflags); 2270 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2271 gfp_mask, reclaim_options, NULL); 2272 psi_memstall_leave(&pflags); 2273 2274 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2275 goto retry; 2276 2277 if (!drained) { 2278 drain_all_stock(mem_over_limit); 2279 drained = true; 2280 goto retry; 2281 } 2282 2283 if (gfp_mask & __GFP_NORETRY) 2284 goto nomem; 2285 /* 2286 * Even though the limit is exceeded at this point, reclaim 2287 * may have been able to free some pages. Retry the charge 2288 * before killing the task. 2289 * 2290 * Only for regular pages, though: huge pages are rather 2291 * unlikely to succeed so close to the limit, and we fall back 2292 * to regular pages anyway in case of failure. 2293 */ 2294 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2295 goto retry; 2296 2297 if (nr_retries--) 2298 goto retry; 2299 2300 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2301 goto nomem; 2302 2303 /* Avoid endless loop for tasks bypassed by the oom killer */ 2304 if (passed_oom && task_is_dying()) 2305 goto nomem; 2306 2307 /* 2308 * keep retrying as long as the memcg oom killer is able to make 2309 * a forward progress or bypass the charge if the oom killer 2310 * couldn't make any progress. 2311 */ 2312 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2313 get_order(nr_pages * PAGE_SIZE))) { 2314 passed_oom = true; 2315 nr_retries = MAX_RECLAIM_RETRIES; 2316 goto retry; 2317 } 2318 nomem: 2319 /* 2320 * Memcg doesn't have a dedicated reserve for atomic 2321 * allocations. But like the global atomic pool, we need to 2322 * put the burden of reclaim on regular allocation requests 2323 * and let these go through as privileged allocations. 2324 */ 2325 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2326 return -ENOMEM; 2327 force: 2328 /* 2329 * If the allocation has to be enforced, don't forget to raise 2330 * a MEMCG_MAX event. 2331 */ 2332 if (!raised_max_event) 2333 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2334 2335 /* 2336 * The allocation either can't fail or will lead to more memory 2337 * being freed very soon. Allow memory usage go over the limit 2338 * temporarily by force charging it. 2339 */ 2340 page_counter_charge(&memcg->memory, nr_pages); 2341 if (do_memsw_account()) 2342 page_counter_charge(&memcg->memsw, nr_pages); 2343 2344 return 0; 2345 2346 done_restock: 2347 if (batch > nr_pages) 2348 refill_stock(memcg, batch - nr_pages); 2349 2350 /* 2351 * If the hierarchy is above the normal consumption range, schedule 2352 * reclaim on returning to userland. We can perform reclaim here 2353 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2354 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2355 * not recorded as it most likely matches current's and won't 2356 * change in the meantime. As high limit is checked again before 2357 * reclaim, the cost of mismatch is negligible. 2358 */ 2359 do { 2360 bool mem_high, swap_high; 2361 2362 mem_high = page_counter_read(&memcg->memory) > 2363 READ_ONCE(memcg->memory.high); 2364 swap_high = page_counter_read(&memcg->swap) > 2365 READ_ONCE(memcg->swap.high); 2366 2367 /* Don't bother a random interrupted task */ 2368 if (!in_task()) { 2369 if (mem_high) { 2370 schedule_work(&memcg->high_work); 2371 break; 2372 } 2373 continue; 2374 } 2375 2376 if (mem_high || swap_high) { 2377 /* 2378 * The allocating tasks in this cgroup will need to do 2379 * reclaim or be throttled to prevent further growth 2380 * of the memory or swap footprints. 2381 * 2382 * Target some best-effort fairness between the tasks, 2383 * and distribute reclaim work and delay penalties 2384 * based on how much each task is actually allocating. 2385 */ 2386 current->memcg_nr_pages_over_high += batch; 2387 set_notify_resume(current); 2388 break; 2389 } 2390 } while ((memcg = parent_mem_cgroup(memcg))); 2391 2392 /* 2393 * Reclaim is set up above to be called from the userland 2394 * return path. But also attempt synchronous reclaim to avoid 2395 * excessive overrun while the task is still inside the 2396 * kernel. If this is successful, the return path will see it 2397 * when it rechecks the overage and simply bail out. 2398 */ 2399 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2400 !(current->flags & PF_MEMALLOC) && 2401 gfpflags_allow_blocking(gfp_mask)) 2402 mem_cgroup_handle_over_high(gfp_mask); 2403 return 0; 2404 } 2405 2406 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2407 unsigned int nr_pages) 2408 { 2409 if (mem_cgroup_is_root(memcg)) 2410 return 0; 2411 2412 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2413 } 2414 2415 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2416 { 2417 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); 2418 /* 2419 * Any of the following ensures page's memcg stability: 2420 * 2421 * - the page lock 2422 * - LRU isolation 2423 * - exclusive reference 2424 */ 2425 folio->memcg_data = (unsigned long)memcg; 2426 } 2427 2428 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, 2429 struct pglist_data *pgdat, 2430 enum node_stat_item idx, int nr) 2431 { 2432 struct mem_cgroup *memcg; 2433 struct lruvec *lruvec; 2434 2435 rcu_read_lock(); 2436 memcg = obj_cgroup_memcg(objcg); 2437 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2438 __mod_memcg_lruvec_state(lruvec, idx, nr); 2439 rcu_read_unlock(); 2440 } 2441 2442 static __always_inline 2443 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) 2444 { 2445 /* 2446 * Slab objects are accounted individually, not per-page. 2447 * Memcg membership data for each individual object is saved in 2448 * slab->obj_exts. 2449 */ 2450 if (folio_test_slab(folio)) { 2451 struct slabobj_ext *obj_exts; 2452 struct slab *slab; 2453 unsigned int off; 2454 2455 slab = folio_slab(folio); 2456 obj_exts = slab_obj_exts(slab); 2457 if (!obj_exts) 2458 return NULL; 2459 2460 off = obj_to_index(slab->slab_cache, slab, p); 2461 if (obj_exts[off].objcg) 2462 return obj_cgroup_memcg(obj_exts[off].objcg); 2463 2464 return NULL; 2465 } 2466 2467 /* 2468 * folio_memcg_check() is used here, because in theory we can encounter 2469 * a folio where the slab flag has been cleared already, but 2470 * slab->obj_exts has not been freed yet 2471 * folio_memcg_check() will guarantee that a proper memory 2472 * cgroup pointer or NULL will be returned. 2473 */ 2474 return folio_memcg_check(folio); 2475 } 2476 2477 /* 2478 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2479 * It is not suitable for objects allocated using vmalloc(). 2480 * 2481 * A passed kernel object must be a slab object or a generic kernel page. 2482 * 2483 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2484 * cgroup_mutex, etc. 2485 */ 2486 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 2487 { 2488 if (mem_cgroup_disabled()) 2489 return NULL; 2490 2491 return mem_cgroup_from_obj_folio(virt_to_folio(p), p); 2492 } 2493 2494 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 2495 { 2496 struct obj_cgroup *objcg = NULL; 2497 2498 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2499 objcg = rcu_dereference(memcg->objcg); 2500 if (likely(objcg && obj_cgroup_tryget(objcg))) 2501 break; 2502 objcg = NULL; 2503 } 2504 return objcg; 2505 } 2506 2507 static struct obj_cgroup *current_objcg_update(void) 2508 { 2509 struct mem_cgroup *memcg; 2510 struct obj_cgroup *old, *objcg = NULL; 2511 2512 do { 2513 /* Atomically drop the update bit. */ 2514 old = xchg(¤t->objcg, NULL); 2515 if (old) { 2516 old = (struct obj_cgroup *) 2517 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); 2518 obj_cgroup_put(old); 2519 2520 old = NULL; 2521 } 2522 2523 /* If new objcg is NULL, no reason for the second atomic update. */ 2524 if (!current->mm || (current->flags & PF_KTHREAD)) 2525 return NULL; 2526 2527 /* 2528 * Release the objcg pointer from the previous iteration, 2529 * if try_cmpxcg() below fails. 2530 */ 2531 if (unlikely(objcg)) { 2532 obj_cgroup_put(objcg); 2533 objcg = NULL; 2534 } 2535 2536 /* 2537 * Obtain the new objcg pointer. The current task can be 2538 * asynchronously moved to another memcg and the previous 2539 * memcg can be offlined. So let's get the memcg pointer 2540 * and try get a reference to objcg under a rcu read lock. 2541 */ 2542 2543 rcu_read_lock(); 2544 memcg = mem_cgroup_from_task(current); 2545 objcg = __get_obj_cgroup_from_memcg(memcg); 2546 rcu_read_unlock(); 2547 2548 /* 2549 * Try set up a new objcg pointer atomically. If it 2550 * fails, it means the update flag was set concurrently, so 2551 * the whole procedure should be repeated. 2552 */ 2553 } while (!try_cmpxchg(¤t->objcg, &old, objcg)); 2554 2555 return objcg; 2556 } 2557 2558 __always_inline struct obj_cgroup *current_obj_cgroup(void) 2559 { 2560 struct mem_cgroup *memcg; 2561 struct obj_cgroup *objcg; 2562 2563 if (in_task()) { 2564 memcg = current->active_memcg; 2565 if (unlikely(memcg)) 2566 goto from_memcg; 2567 2568 objcg = READ_ONCE(current->objcg); 2569 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG)) 2570 objcg = current_objcg_update(); 2571 /* 2572 * Objcg reference is kept by the task, so it's safe 2573 * to use the objcg by the current task. 2574 */ 2575 return objcg; 2576 } 2577 2578 memcg = this_cpu_read(int_active_memcg); 2579 if (unlikely(memcg)) 2580 goto from_memcg; 2581 2582 return NULL; 2583 2584 from_memcg: 2585 objcg = NULL; 2586 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2587 /* 2588 * Memcg pointer is protected by scope (see set_active_memcg()) 2589 * and is pinning the corresponding objcg, so objcg can't go 2590 * away and can be used within the scope without any additional 2591 * protection. 2592 */ 2593 objcg = rcu_dereference_check(memcg->objcg, 1); 2594 if (likely(objcg)) 2595 break; 2596 } 2597 2598 return objcg; 2599 } 2600 2601 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 2602 { 2603 struct obj_cgroup *objcg; 2604 2605 if (!memcg_kmem_online()) 2606 return NULL; 2607 2608 if (folio_memcg_kmem(folio)) { 2609 objcg = __folio_objcg(folio); 2610 obj_cgroup_get(objcg); 2611 } else { 2612 struct mem_cgroup *memcg; 2613 2614 rcu_read_lock(); 2615 memcg = __folio_memcg(folio); 2616 if (memcg) 2617 objcg = __get_obj_cgroup_from_memcg(memcg); 2618 else 2619 objcg = NULL; 2620 rcu_read_unlock(); 2621 } 2622 return objcg; 2623 } 2624 2625 /* 2626 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2627 * @objcg: object cgroup to uncharge 2628 * @nr_pages: number of pages to uncharge 2629 */ 2630 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2631 unsigned int nr_pages) 2632 { 2633 struct mem_cgroup *memcg; 2634 2635 memcg = get_mem_cgroup_from_objcg(objcg); 2636 2637 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 2638 memcg1_account_kmem(memcg, -nr_pages); 2639 refill_stock(memcg, nr_pages); 2640 2641 css_put(&memcg->css); 2642 } 2643 2644 /* 2645 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2646 * @objcg: object cgroup to charge 2647 * @gfp: reclaim mode 2648 * @nr_pages: number of pages to charge 2649 * 2650 * Returns 0 on success, an error code on failure. 2651 */ 2652 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 2653 unsigned int nr_pages) 2654 { 2655 struct mem_cgroup *memcg; 2656 int ret; 2657 2658 memcg = get_mem_cgroup_from_objcg(objcg); 2659 2660 ret = try_charge_memcg(memcg, gfp, nr_pages); 2661 if (ret) 2662 goto out; 2663 2664 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); 2665 memcg1_account_kmem(memcg, nr_pages); 2666 out: 2667 css_put(&memcg->css); 2668 2669 return ret; 2670 } 2671 2672 /** 2673 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 2674 * @page: page to charge 2675 * @gfp: reclaim mode 2676 * @order: allocation order 2677 * 2678 * Returns 0 on success, an error code on failure. 2679 */ 2680 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 2681 { 2682 struct obj_cgroup *objcg; 2683 int ret = 0; 2684 2685 objcg = current_obj_cgroup(); 2686 if (objcg) { 2687 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 2688 if (!ret) { 2689 obj_cgroup_get(objcg); 2690 page->memcg_data = (unsigned long)objcg | 2691 MEMCG_DATA_KMEM; 2692 return 0; 2693 } 2694 } 2695 return ret; 2696 } 2697 2698 /** 2699 * __memcg_kmem_uncharge_page: uncharge a kmem page 2700 * @page: page to uncharge 2701 * @order: allocation order 2702 */ 2703 void __memcg_kmem_uncharge_page(struct page *page, int order) 2704 { 2705 struct folio *folio = page_folio(page); 2706 struct obj_cgroup *objcg; 2707 unsigned int nr_pages = 1 << order; 2708 2709 if (!folio_memcg_kmem(folio)) 2710 return; 2711 2712 objcg = __folio_objcg(folio); 2713 obj_cgroup_uncharge_pages(objcg, nr_pages); 2714 folio->memcg_data = 0; 2715 obj_cgroup_put(objcg); 2716 } 2717 2718 /* Replace the stock objcg with objcg, return the old objcg */ 2719 static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock, 2720 struct obj_cgroup *objcg) 2721 { 2722 struct obj_cgroup *old = NULL; 2723 2724 old = drain_obj_stock(stock); 2725 obj_cgroup_get(objcg); 2726 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 2727 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 2728 WRITE_ONCE(stock->cached_objcg, objcg); 2729 return old; 2730 } 2731 2732 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 2733 enum node_stat_item idx, int nr) 2734 { 2735 struct memcg_stock_pcp *stock; 2736 struct obj_cgroup *old = NULL; 2737 unsigned long flags; 2738 int *bytes; 2739 2740 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2741 stock = this_cpu_ptr(&memcg_stock); 2742 2743 /* 2744 * Save vmstat data in stock and skip vmstat array update unless 2745 * accumulating over a page of vmstat data or when pgdat or idx 2746 * changes. 2747 */ 2748 if (READ_ONCE(stock->cached_objcg) != objcg) { 2749 old = replace_stock_objcg(stock, objcg); 2750 stock->cached_pgdat = pgdat; 2751 } else if (stock->cached_pgdat != pgdat) { 2752 /* Flush the existing cached vmstat data */ 2753 struct pglist_data *oldpg = stock->cached_pgdat; 2754 2755 if (stock->nr_slab_reclaimable_b) { 2756 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 2757 stock->nr_slab_reclaimable_b); 2758 stock->nr_slab_reclaimable_b = 0; 2759 } 2760 if (stock->nr_slab_unreclaimable_b) { 2761 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 2762 stock->nr_slab_unreclaimable_b); 2763 stock->nr_slab_unreclaimable_b = 0; 2764 } 2765 stock->cached_pgdat = pgdat; 2766 } 2767 2768 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 2769 : &stock->nr_slab_unreclaimable_b; 2770 /* 2771 * Even for large object >= PAGE_SIZE, the vmstat data will still be 2772 * cached locally at least once before pushing it out. 2773 */ 2774 if (!*bytes) { 2775 *bytes = nr; 2776 nr = 0; 2777 } else { 2778 *bytes += nr; 2779 if (abs(*bytes) > PAGE_SIZE) { 2780 nr = *bytes; 2781 *bytes = 0; 2782 } else { 2783 nr = 0; 2784 } 2785 } 2786 if (nr) 2787 __mod_objcg_mlstate(objcg, pgdat, idx, nr); 2788 2789 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2790 obj_cgroup_put(old); 2791 } 2792 2793 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 2794 { 2795 struct memcg_stock_pcp *stock; 2796 unsigned long flags; 2797 bool ret = false; 2798 2799 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2800 2801 stock = this_cpu_ptr(&memcg_stock); 2802 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { 2803 stock->nr_bytes -= nr_bytes; 2804 ret = true; 2805 } 2806 2807 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2808 2809 return ret; 2810 } 2811 2812 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 2813 { 2814 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); 2815 2816 if (!old) 2817 return NULL; 2818 2819 if (stock->nr_bytes) { 2820 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 2821 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 2822 2823 if (nr_pages) { 2824 struct mem_cgroup *memcg; 2825 2826 memcg = get_mem_cgroup_from_objcg(old); 2827 2828 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 2829 memcg1_account_kmem(memcg, -nr_pages); 2830 __refill_stock(memcg, nr_pages); 2831 2832 css_put(&memcg->css); 2833 } 2834 2835 /* 2836 * The leftover is flushed to the centralized per-memcg value. 2837 * On the next attempt to refill obj stock it will be moved 2838 * to a per-cpu stock (probably, on an other CPU), see 2839 * refill_obj_stock(). 2840 * 2841 * How often it's flushed is a trade-off between the memory 2842 * limit enforcement accuracy and potential CPU contention, 2843 * so it might be changed in the future. 2844 */ 2845 atomic_add(nr_bytes, &old->nr_charged_bytes); 2846 stock->nr_bytes = 0; 2847 } 2848 2849 /* 2850 * Flush the vmstat data in current stock 2851 */ 2852 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 2853 if (stock->nr_slab_reclaimable_b) { 2854 __mod_objcg_mlstate(old, stock->cached_pgdat, 2855 NR_SLAB_RECLAIMABLE_B, 2856 stock->nr_slab_reclaimable_b); 2857 stock->nr_slab_reclaimable_b = 0; 2858 } 2859 if (stock->nr_slab_unreclaimable_b) { 2860 __mod_objcg_mlstate(old, stock->cached_pgdat, 2861 NR_SLAB_UNRECLAIMABLE_B, 2862 stock->nr_slab_unreclaimable_b); 2863 stock->nr_slab_unreclaimable_b = 0; 2864 } 2865 stock->cached_pgdat = NULL; 2866 } 2867 2868 WRITE_ONCE(stock->cached_objcg, NULL); 2869 /* 2870 * The `old' objects needs to be released by the caller via 2871 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. 2872 */ 2873 return old; 2874 } 2875 2876 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2877 struct mem_cgroup *root_memcg) 2878 { 2879 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); 2880 struct mem_cgroup *memcg; 2881 2882 if (objcg) { 2883 memcg = obj_cgroup_memcg(objcg); 2884 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 2885 return true; 2886 } 2887 2888 return false; 2889 } 2890 2891 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 2892 bool allow_uncharge) 2893 { 2894 struct memcg_stock_pcp *stock; 2895 struct obj_cgroup *old = NULL; 2896 unsigned long flags; 2897 unsigned int nr_pages = 0; 2898 2899 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2900 2901 stock = this_cpu_ptr(&memcg_stock); 2902 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ 2903 old = replace_stock_objcg(stock, objcg); 2904 allow_uncharge = true; /* Allow uncharge when objcg changes */ 2905 } 2906 stock->nr_bytes += nr_bytes; 2907 2908 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 2909 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 2910 stock->nr_bytes &= (PAGE_SIZE - 1); 2911 } 2912 2913 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2914 obj_cgroup_put(old); 2915 2916 if (nr_pages) 2917 obj_cgroup_uncharge_pages(objcg, nr_pages); 2918 } 2919 2920 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 2921 { 2922 unsigned int nr_pages, nr_bytes; 2923 int ret; 2924 2925 if (consume_obj_stock(objcg, size)) 2926 return 0; 2927 2928 /* 2929 * In theory, objcg->nr_charged_bytes can have enough 2930 * pre-charged bytes to satisfy the allocation. However, 2931 * flushing objcg->nr_charged_bytes requires two atomic 2932 * operations, and objcg->nr_charged_bytes can't be big. 2933 * The shared objcg->nr_charged_bytes can also become a 2934 * performance bottleneck if all tasks of the same memcg are 2935 * trying to update it. So it's better to ignore it and try 2936 * grab some new pages. The stock's nr_bytes will be flushed to 2937 * objcg->nr_charged_bytes later on when objcg changes. 2938 * 2939 * The stock's nr_bytes may contain enough pre-charged bytes 2940 * to allow one less page from being charged, but we can't rely 2941 * on the pre-charged bytes not being changed outside of 2942 * consume_obj_stock() or refill_obj_stock(). So ignore those 2943 * pre-charged bytes as well when charging pages. To avoid a 2944 * page uncharge right after a page charge, we set the 2945 * allow_uncharge flag to false when calling refill_obj_stock() 2946 * to temporarily allow the pre-charged bytes to exceed the page 2947 * size limit. The maximum reachable value of the pre-charged 2948 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 2949 * race. 2950 */ 2951 nr_pages = size >> PAGE_SHIFT; 2952 nr_bytes = size & (PAGE_SIZE - 1); 2953 2954 if (nr_bytes) 2955 nr_pages += 1; 2956 2957 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 2958 if (!ret && nr_bytes) 2959 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 2960 2961 return ret; 2962 } 2963 2964 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 2965 { 2966 refill_obj_stock(objcg, size, true); 2967 } 2968 2969 static inline size_t obj_full_size(struct kmem_cache *s) 2970 { 2971 /* 2972 * For each accounted object there is an extra space which is used 2973 * to store obj_cgroup membership. Charge it too. 2974 */ 2975 return s->size + sizeof(struct obj_cgroup *); 2976 } 2977 2978 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2979 gfp_t flags, size_t size, void **p) 2980 { 2981 struct obj_cgroup *objcg; 2982 struct slab *slab; 2983 unsigned long off; 2984 size_t i; 2985 2986 /* 2987 * The obtained objcg pointer is safe to use within the current scope, 2988 * defined by current task or set_active_memcg() pair. 2989 * obj_cgroup_get() is used to get a permanent reference. 2990 */ 2991 objcg = current_obj_cgroup(); 2992 if (!objcg) 2993 return true; 2994 2995 /* 2996 * slab_alloc_node() avoids the NULL check, so we might be called with a 2997 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill 2998 * the whole requested size. 2999 * return success as there's nothing to free back 3000 */ 3001 if (unlikely(*p == NULL)) 3002 return true; 3003 3004 flags &= gfp_allowed_mask; 3005 3006 if (lru) { 3007 int ret; 3008 struct mem_cgroup *memcg; 3009 3010 memcg = get_mem_cgroup_from_objcg(objcg); 3011 ret = memcg_list_lru_alloc(memcg, lru, flags); 3012 css_put(&memcg->css); 3013 3014 if (ret) 3015 return false; 3016 } 3017 3018 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) 3019 return false; 3020 3021 for (i = 0; i < size; i++) { 3022 slab = virt_to_slab(p[i]); 3023 3024 if (!slab_obj_exts(slab) && 3025 alloc_slab_obj_exts(slab, s, flags, false)) { 3026 obj_cgroup_uncharge(objcg, obj_full_size(s)); 3027 continue; 3028 } 3029 3030 off = obj_to_index(s, slab, p[i]); 3031 obj_cgroup_get(objcg); 3032 slab_obj_exts(slab)[off].objcg = objcg; 3033 mod_objcg_state(objcg, slab_pgdat(slab), 3034 cache_vmstat_idx(s), obj_full_size(s)); 3035 } 3036 3037 return true; 3038 } 3039 3040 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 3041 void **p, int objects, struct slabobj_ext *obj_exts) 3042 { 3043 for (int i = 0; i < objects; i++) { 3044 struct obj_cgroup *objcg; 3045 unsigned int off; 3046 3047 off = obj_to_index(s, slab, p[i]); 3048 objcg = obj_exts[off].objcg; 3049 if (!objcg) 3050 continue; 3051 3052 obj_exts[off].objcg = NULL; 3053 obj_cgroup_uncharge(objcg, obj_full_size(s)); 3054 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 3055 -obj_full_size(s)); 3056 obj_cgroup_put(objcg); 3057 } 3058 } 3059 3060 /* 3061 * Because folio_memcg(head) is not set on tails, set it now. 3062 */ 3063 void split_page_memcg(struct page *head, int old_order, int new_order) 3064 { 3065 struct folio *folio = page_folio(head); 3066 int i; 3067 unsigned int old_nr = 1 << old_order; 3068 unsigned int new_nr = 1 << new_order; 3069 3070 if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) 3071 return; 3072 3073 for (i = new_nr; i < old_nr; i += new_nr) 3074 folio_page(folio, i)->memcg_data = folio->memcg_data; 3075 3076 if (folio_memcg_kmem(folio)) 3077 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); 3078 else 3079 css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1); 3080 } 3081 3082 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3083 { 3084 unsigned long val; 3085 3086 if (mem_cgroup_is_root(memcg)) { 3087 /* 3088 * Approximate root's usage from global state. This isn't 3089 * perfect, but the root usage was always an approximation. 3090 */ 3091 val = global_node_page_state(NR_FILE_PAGES) + 3092 global_node_page_state(NR_ANON_MAPPED); 3093 if (swap) 3094 val += total_swap_pages - get_nr_swap_pages(); 3095 } else { 3096 if (!swap) 3097 val = page_counter_read(&memcg->memory); 3098 else 3099 val = page_counter_read(&memcg->memsw); 3100 } 3101 return val; 3102 } 3103 3104 static int memcg_online_kmem(struct mem_cgroup *memcg) 3105 { 3106 struct obj_cgroup *objcg; 3107 3108 if (mem_cgroup_kmem_disabled()) 3109 return 0; 3110 3111 if (unlikely(mem_cgroup_is_root(memcg))) 3112 return 0; 3113 3114 objcg = obj_cgroup_alloc(); 3115 if (!objcg) 3116 return -ENOMEM; 3117 3118 objcg->memcg = memcg; 3119 rcu_assign_pointer(memcg->objcg, objcg); 3120 obj_cgroup_get(objcg); 3121 memcg->orig_objcg = objcg; 3122 3123 static_branch_enable(&memcg_kmem_online_key); 3124 3125 memcg->kmemcg_id = memcg->id.id; 3126 3127 return 0; 3128 } 3129 3130 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3131 { 3132 struct mem_cgroup *parent; 3133 3134 if (mem_cgroup_kmem_disabled()) 3135 return; 3136 3137 if (unlikely(mem_cgroup_is_root(memcg))) 3138 return; 3139 3140 parent = parent_mem_cgroup(memcg); 3141 if (!parent) 3142 parent = root_mem_cgroup; 3143 3144 memcg_reparent_list_lrus(memcg, parent); 3145 3146 /* 3147 * Objcg's reparenting must be after list_lru's, make sure list_lru 3148 * helpers won't use parent's list_lru until child is drained. 3149 */ 3150 memcg_reparent_objcgs(memcg, parent); 3151 } 3152 3153 #ifdef CONFIG_CGROUP_WRITEBACK 3154 3155 #include <trace/events/writeback.h> 3156 3157 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3158 { 3159 return wb_domain_init(&memcg->cgwb_domain, gfp); 3160 } 3161 3162 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3163 { 3164 wb_domain_exit(&memcg->cgwb_domain); 3165 } 3166 3167 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3168 { 3169 wb_domain_size_changed(&memcg->cgwb_domain); 3170 } 3171 3172 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3173 { 3174 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3175 3176 if (!memcg->css.parent) 3177 return NULL; 3178 3179 return &memcg->cgwb_domain; 3180 } 3181 3182 /** 3183 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3184 * @wb: bdi_writeback in question 3185 * @pfilepages: out parameter for number of file pages 3186 * @pheadroom: out parameter for number of allocatable pages according to memcg 3187 * @pdirty: out parameter for number of dirty pages 3188 * @pwriteback: out parameter for number of pages under writeback 3189 * 3190 * Determine the numbers of file, headroom, dirty, and writeback pages in 3191 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3192 * is a bit more involved. 3193 * 3194 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3195 * headroom is calculated as the lowest headroom of itself and the 3196 * ancestors. Note that this doesn't consider the actual amount of 3197 * available memory in the system. The caller should further cap 3198 * *@pheadroom accordingly. 3199 */ 3200 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3201 unsigned long *pheadroom, unsigned long *pdirty, 3202 unsigned long *pwriteback) 3203 { 3204 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3205 struct mem_cgroup *parent; 3206 3207 mem_cgroup_flush_stats_ratelimited(memcg); 3208 3209 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3210 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3211 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 3212 memcg_page_state(memcg, NR_ACTIVE_FILE); 3213 3214 *pheadroom = PAGE_COUNTER_MAX; 3215 while ((parent = parent_mem_cgroup(memcg))) { 3216 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 3217 READ_ONCE(memcg->memory.high)); 3218 unsigned long used = page_counter_read(&memcg->memory); 3219 3220 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3221 memcg = parent; 3222 } 3223 } 3224 3225 /* 3226 * Foreign dirty flushing 3227 * 3228 * There's an inherent mismatch between memcg and writeback. The former 3229 * tracks ownership per-page while the latter per-inode. This was a 3230 * deliberate design decision because honoring per-page ownership in the 3231 * writeback path is complicated, may lead to higher CPU and IO overheads 3232 * and deemed unnecessary given that write-sharing an inode across 3233 * different cgroups isn't a common use-case. 3234 * 3235 * Combined with inode majority-writer ownership switching, this works well 3236 * enough in most cases but there are some pathological cases. For 3237 * example, let's say there are two cgroups A and B which keep writing to 3238 * different but confined parts of the same inode. B owns the inode and 3239 * A's memory is limited far below B's. A's dirty ratio can rise enough to 3240 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 3241 * triggering background writeback. A will be slowed down without a way to 3242 * make writeback of the dirty pages happen. 3243 * 3244 * Conditions like the above can lead to a cgroup getting repeatedly and 3245 * severely throttled after making some progress after each 3246 * dirty_expire_interval while the underlying IO device is almost 3247 * completely idle. 3248 * 3249 * Solving this problem completely requires matching the ownership tracking 3250 * granularities between memcg and writeback in either direction. However, 3251 * the more egregious behaviors can be avoided by simply remembering the 3252 * most recent foreign dirtying events and initiating remote flushes on 3253 * them when local writeback isn't enough to keep the memory clean enough. 3254 * 3255 * The following two functions implement such mechanism. When a foreign 3256 * page - a page whose memcg and writeback ownerships don't match - is 3257 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 3258 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 3259 * decides that the memcg needs to sleep due to high dirty ratio, it calls 3260 * mem_cgroup_flush_foreign() which queues writeback on the recorded 3261 * foreign bdi_writebacks which haven't expired. Both the numbers of 3262 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 3263 * limited to MEMCG_CGWB_FRN_CNT. 3264 * 3265 * The mechanism only remembers IDs and doesn't hold any object references. 3266 * As being wrong occasionally doesn't matter, updates and accesses to the 3267 * records are lockless and racy. 3268 */ 3269 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 3270 struct bdi_writeback *wb) 3271 { 3272 struct mem_cgroup *memcg = folio_memcg(folio); 3273 struct memcg_cgwb_frn *frn; 3274 u64 now = get_jiffies_64(); 3275 u64 oldest_at = now; 3276 int oldest = -1; 3277 int i; 3278 3279 trace_track_foreign_dirty(folio, wb); 3280 3281 /* 3282 * Pick the slot to use. If there is already a slot for @wb, keep 3283 * using it. If not replace the oldest one which isn't being 3284 * written out. 3285 */ 3286 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3287 frn = &memcg->cgwb_frn[i]; 3288 if (frn->bdi_id == wb->bdi->id && 3289 frn->memcg_id == wb->memcg_css->id) 3290 break; 3291 if (time_before64(frn->at, oldest_at) && 3292 atomic_read(&frn->done.cnt) == 1) { 3293 oldest = i; 3294 oldest_at = frn->at; 3295 } 3296 } 3297 3298 if (i < MEMCG_CGWB_FRN_CNT) { 3299 /* 3300 * Re-using an existing one. Update timestamp lazily to 3301 * avoid making the cacheline hot. We want them to be 3302 * reasonably up-to-date and significantly shorter than 3303 * dirty_expire_interval as that's what expires the record. 3304 * Use the shorter of 1s and dirty_expire_interval / 8. 3305 */ 3306 unsigned long update_intv = 3307 min_t(unsigned long, HZ, 3308 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 3309 3310 if (time_before64(frn->at, now - update_intv)) 3311 frn->at = now; 3312 } else if (oldest >= 0) { 3313 /* replace the oldest free one */ 3314 frn = &memcg->cgwb_frn[oldest]; 3315 frn->bdi_id = wb->bdi->id; 3316 frn->memcg_id = wb->memcg_css->id; 3317 frn->at = now; 3318 } 3319 } 3320 3321 /* issue foreign writeback flushes for recorded foreign dirtying events */ 3322 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 3323 { 3324 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3325 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 3326 u64 now = jiffies_64; 3327 int i; 3328 3329 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3330 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 3331 3332 /* 3333 * If the record is older than dirty_expire_interval, 3334 * writeback on it has already started. No need to kick it 3335 * off again. Also, don't start a new one if there's 3336 * already one in flight. 3337 */ 3338 if (time_after64(frn->at, now - intv) && 3339 atomic_read(&frn->done.cnt) == 1) { 3340 frn->at = 0; 3341 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 3342 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 3343 WB_REASON_FOREIGN_FLUSH, 3344 &frn->done); 3345 } 3346 } 3347 } 3348 3349 #else /* CONFIG_CGROUP_WRITEBACK */ 3350 3351 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3352 { 3353 return 0; 3354 } 3355 3356 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3357 { 3358 } 3359 3360 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3361 { 3362 } 3363 3364 #endif /* CONFIG_CGROUP_WRITEBACK */ 3365 3366 /* 3367 * Private memory cgroup IDR 3368 * 3369 * Swap-out records and page cache shadow entries need to store memcg 3370 * references in constrained space, so we maintain an ID space that is 3371 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 3372 * memory-controlled cgroups to 64k. 3373 * 3374 * However, there usually are many references to the offline CSS after 3375 * the cgroup has been destroyed, such as page cache or reclaimable 3376 * slab objects, that don't need to hang on to the ID. We want to keep 3377 * those dead CSS from occupying IDs, or we might quickly exhaust the 3378 * relatively small ID space and prevent the creation of new cgroups 3379 * even when there are much fewer than 64k cgroups - possibly none. 3380 * 3381 * Maintain a private 16-bit ID space for memcg, and allow the ID to 3382 * be freed and recycled when it's no longer needed, which is usually 3383 * when the CSS is offlined. 3384 * 3385 * The only exception to that are records of swapped out tmpfs/shmem 3386 * pages that need to be attributed to live ancestors on swapin. But 3387 * those references are manageable from userspace. 3388 */ 3389 3390 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 3391 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids); 3392 3393 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 3394 { 3395 if (memcg->id.id > 0) { 3396 xa_erase(&mem_cgroup_ids, memcg->id.id); 3397 memcg->id.id = 0; 3398 } 3399 } 3400 3401 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 3402 unsigned int n) 3403 { 3404 refcount_add(n, &memcg->id.ref); 3405 } 3406 3407 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 3408 { 3409 if (refcount_sub_and_test(n, &memcg->id.ref)) { 3410 mem_cgroup_id_remove(memcg); 3411 3412 /* Memcg ID pins CSS */ 3413 css_put(&memcg->css); 3414 } 3415 } 3416 3417 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 3418 { 3419 mem_cgroup_id_put_many(memcg, 1); 3420 } 3421 3422 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 3423 { 3424 while (!refcount_inc_not_zero(&memcg->id.ref)) { 3425 /* 3426 * The root cgroup cannot be destroyed, so it's refcount must 3427 * always be >= 1. 3428 */ 3429 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 3430 VM_BUG_ON(1); 3431 break; 3432 } 3433 memcg = parent_mem_cgroup(memcg); 3434 if (!memcg) 3435 memcg = root_mem_cgroup; 3436 } 3437 return memcg; 3438 } 3439 3440 /** 3441 * mem_cgroup_from_id - look up a memcg from a memcg id 3442 * @id: the memcg id to look up 3443 * 3444 * Caller must hold rcu_read_lock(). 3445 */ 3446 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 3447 { 3448 WARN_ON_ONCE(!rcu_read_lock_held()); 3449 return xa_load(&mem_cgroup_ids, id); 3450 } 3451 3452 #ifdef CONFIG_SHRINKER_DEBUG 3453 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 3454 { 3455 struct cgroup *cgrp; 3456 struct cgroup_subsys_state *css; 3457 struct mem_cgroup *memcg; 3458 3459 cgrp = cgroup_get_from_id(ino); 3460 if (IS_ERR(cgrp)) 3461 return ERR_CAST(cgrp); 3462 3463 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 3464 if (css) 3465 memcg = container_of(css, struct mem_cgroup, css); 3466 else 3467 memcg = ERR_PTR(-ENOENT); 3468 3469 cgroup_put(cgrp); 3470 3471 return memcg; 3472 } 3473 #endif 3474 3475 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn) 3476 { 3477 if (!pn) 3478 return; 3479 3480 free_percpu(pn->lruvec_stats_percpu); 3481 kfree(pn->lruvec_stats); 3482 kfree(pn); 3483 } 3484 3485 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 3486 { 3487 struct mem_cgroup_per_node *pn; 3488 3489 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node); 3490 if (!pn) 3491 return false; 3492 3493 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), 3494 GFP_KERNEL_ACCOUNT, node); 3495 if (!pn->lruvec_stats) 3496 goto fail; 3497 3498 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 3499 GFP_KERNEL_ACCOUNT); 3500 if (!pn->lruvec_stats_percpu) 3501 goto fail; 3502 3503 lruvec_init(&pn->lruvec); 3504 pn->memcg = memcg; 3505 3506 memcg->nodeinfo[node] = pn; 3507 return true; 3508 fail: 3509 free_mem_cgroup_per_node_info(pn); 3510 return false; 3511 } 3512 3513 static void __mem_cgroup_free(struct mem_cgroup *memcg) 3514 { 3515 int node; 3516 3517 obj_cgroup_put(memcg->orig_objcg); 3518 3519 for_each_node(node) 3520 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); 3521 memcg1_free_events(memcg); 3522 kfree(memcg->vmstats); 3523 free_percpu(memcg->vmstats_percpu); 3524 kfree(memcg); 3525 } 3526 3527 static void mem_cgroup_free(struct mem_cgroup *memcg) 3528 { 3529 lru_gen_exit_memcg(memcg); 3530 memcg_wb_domain_exit(memcg); 3531 __mem_cgroup_free(memcg); 3532 } 3533 3534 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) 3535 { 3536 struct memcg_vmstats_percpu *statc, *pstatc; 3537 struct mem_cgroup *memcg; 3538 int node, cpu; 3539 int __maybe_unused i; 3540 long error; 3541 3542 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); 3543 if (!memcg) 3544 return ERR_PTR(-ENOMEM); 3545 3546 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, 3547 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL); 3548 if (error) 3549 goto fail; 3550 error = -ENOMEM; 3551 3552 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), 3553 GFP_KERNEL_ACCOUNT); 3554 if (!memcg->vmstats) 3555 goto fail; 3556 3557 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 3558 GFP_KERNEL_ACCOUNT); 3559 if (!memcg->vmstats_percpu) 3560 goto fail; 3561 3562 if (!memcg1_alloc_events(memcg)) 3563 goto fail; 3564 3565 for_each_possible_cpu(cpu) { 3566 if (parent) 3567 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); 3568 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3569 statc->parent = parent ? pstatc : NULL; 3570 statc->vmstats = memcg->vmstats; 3571 } 3572 3573 for_each_node(node) 3574 if (!alloc_mem_cgroup_per_node_info(memcg, node)) 3575 goto fail; 3576 3577 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 3578 goto fail; 3579 3580 INIT_WORK(&memcg->high_work, high_work_func); 3581 vmpressure_init(&memcg->vmpressure); 3582 INIT_LIST_HEAD(&memcg->memory_peaks); 3583 INIT_LIST_HEAD(&memcg->swap_peaks); 3584 spin_lock_init(&memcg->peaks_lock); 3585 memcg->socket_pressure = jiffies; 3586 memcg1_memcg_init(memcg); 3587 memcg->kmemcg_id = -1; 3588 INIT_LIST_HEAD(&memcg->objcg_list); 3589 #ifdef CONFIG_CGROUP_WRITEBACK 3590 INIT_LIST_HEAD(&memcg->cgwb_list); 3591 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3592 memcg->cgwb_frn[i].done = 3593 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 3594 #endif 3595 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3596 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 3597 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 3598 memcg->deferred_split_queue.split_queue_len = 0; 3599 #endif 3600 lru_gen_init_memcg(memcg); 3601 return memcg; 3602 fail: 3603 mem_cgroup_id_remove(memcg); 3604 __mem_cgroup_free(memcg); 3605 return ERR_PTR(error); 3606 } 3607 3608 static struct cgroup_subsys_state * __ref 3609 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 3610 { 3611 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 3612 struct mem_cgroup *memcg, *old_memcg; 3613 3614 old_memcg = set_active_memcg(parent); 3615 memcg = mem_cgroup_alloc(parent); 3616 set_active_memcg(old_memcg); 3617 if (IS_ERR(memcg)) 3618 return ERR_CAST(memcg); 3619 3620 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3621 memcg1_soft_limit_reset(memcg); 3622 #ifdef CONFIG_ZSWAP 3623 memcg->zswap_max = PAGE_COUNTER_MAX; 3624 WRITE_ONCE(memcg->zswap_writeback, true); 3625 #endif 3626 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3627 if (parent) { 3628 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 3629 3630 page_counter_init(&memcg->memory, &parent->memory, true); 3631 page_counter_init(&memcg->swap, &parent->swap, false); 3632 #ifdef CONFIG_MEMCG_V1 3633 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 3634 page_counter_init(&memcg->kmem, &parent->kmem, false); 3635 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); 3636 #endif 3637 } else { 3638 init_memcg_stats(); 3639 init_memcg_events(); 3640 page_counter_init(&memcg->memory, NULL, true); 3641 page_counter_init(&memcg->swap, NULL, false); 3642 #ifdef CONFIG_MEMCG_V1 3643 page_counter_init(&memcg->kmem, NULL, false); 3644 page_counter_init(&memcg->tcpmem, NULL, false); 3645 #endif 3646 root_mem_cgroup = memcg; 3647 return &memcg->css; 3648 } 3649 3650 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 3651 static_branch_inc(&memcg_sockets_enabled_key); 3652 3653 if (!cgroup_memory_nobpf) 3654 static_branch_inc(&memcg_bpf_enabled_key); 3655 3656 return &memcg->css; 3657 } 3658 3659 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 3660 { 3661 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3662 3663 if (memcg_online_kmem(memcg)) 3664 goto remove_id; 3665 3666 /* 3667 * A memcg must be visible for expand_shrinker_info() 3668 * by the time the maps are allocated. So, we allocate maps 3669 * here, when for_each_mem_cgroup() can't skip it. 3670 */ 3671 if (alloc_shrinker_info(memcg)) 3672 goto offline_kmem; 3673 3674 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) 3675 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 3676 FLUSH_TIME); 3677 lru_gen_online_memcg(memcg); 3678 3679 /* Online state pins memcg ID, memcg ID pins CSS */ 3680 refcount_set(&memcg->id.ref, 1); 3681 css_get(css); 3682 3683 /* 3684 * Ensure mem_cgroup_from_id() works once we're fully online. 3685 * 3686 * We could do this earlier and require callers to filter with 3687 * css_tryget_online(). But right now there are no users that 3688 * need earlier access, and the workingset code relies on the 3689 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So 3690 * publish it here at the end of onlining. This matches the 3691 * regular ID destruction during offlining. 3692 */ 3693 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); 3694 3695 return 0; 3696 offline_kmem: 3697 memcg_offline_kmem(memcg); 3698 remove_id: 3699 mem_cgroup_id_remove(memcg); 3700 return -ENOMEM; 3701 } 3702 3703 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 3704 { 3705 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3706 3707 memcg1_css_offline(memcg); 3708 3709 page_counter_set_min(&memcg->memory, 0); 3710 page_counter_set_low(&memcg->memory, 0); 3711 3712 zswap_memcg_offline_cleanup(memcg); 3713 3714 memcg_offline_kmem(memcg); 3715 reparent_shrinker_deferred(memcg); 3716 wb_memcg_offline(memcg); 3717 lru_gen_offline_memcg(memcg); 3718 3719 drain_all_stock(memcg); 3720 3721 mem_cgroup_id_put(memcg); 3722 } 3723 3724 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 3725 { 3726 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3727 3728 invalidate_reclaim_iterators(memcg); 3729 lru_gen_release_memcg(memcg); 3730 } 3731 3732 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 3733 { 3734 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3735 int __maybe_unused i; 3736 3737 #ifdef CONFIG_CGROUP_WRITEBACK 3738 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3739 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 3740 #endif 3741 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 3742 static_branch_dec(&memcg_sockets_enabled_key); 3743 3744 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) 3745 static_branch_dec(&memcg_sockets_enabled_key); 3746 3747 if (!cgroup_memory_nobpf) 3748 static_branch_dec(&memcg_bpf_enabled_key); 3749 3750 vmpressure_cleanup(&memcg->vmpressure); 3751 cancel_work_sync(&memcg->high_work); 3752 memcg1_remove_from_trees(memcg); 3753 free_shrinker_info(memcg); 3754 mem_cgroup_free(memcg); 3755 } 3756 3757 /** 3758 * mem_cgroup_css_reset - reset the states of a mem_cgroup 3759 * @css: the target css 3760 * 3761 * Reset the states of the mem_cgroup associated with @css. This is 3762 * invoked when the userland requests disabling on the default hierarchy 3763 * but the memcg is pinned through dependency. The memcg should stop 3764 * applying policies and should revert to the vanilla state as it may be 3765 * made visible again. 3766 * 3767 * The current implementation only resets the essential configurations. 3768 * This needs to be expanded to cover all the visible parts. 3769 */ 3770 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 3771 { 3772 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3773 3774 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 3775 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 3776 #ifdef CONFIG_MEMCG_V1 3777 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 3778 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 3779 #endif 3780 page_counter_set_min(&memcg->memory, 0); 3781 page_counter_set_low(&memcg->memory, 0); 3782 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3783 memcg1_soft_limit_reset(memcg); 3784 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3785 memcg_wb_domain_size_changed(memcg); 3786 } 3787 3788 struct aggregate_control { 3789 /* pointer to the aggregated (CPU and subtree aggregated) counters */ 3790 long *aggregate; 3791 /* pointer to the non-hierarchichal (CPU aggregated) counters */ 3792 long *local; 3793 /* pointer to the pending child counters during tree propagation */ 3794 long *pending; 3795 /* pointer to the parent's pending counters, could be NULL */ 3796 long *ppending; 3797 /* pointer to the percpu counters to be aggregated */ 3798 long *cstat; 3799 /* pointer to the percpu counters of the last aggregation*/ 3800 long *cstat_prev; 3801 /* size of the above counters */ 3802 int size; 3803 }; 3804 3805 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac) 3806 { 3807 int i; 3808 long delta, delta_cpu, v; 3809 3810 for (i = 0; i < ac->size; i++) { 3811 /* 3812 * Collect the aggregated propagation counts of groups 3813 * below us. We're in a per-cpu loop here and this is 3814 * a global counter, so the first cycle will get them. 3815 */ 3816 delta = ac->pending[i]; 3817 if (delta) 3818 ac->pending[i] = 0; 3819 3820 /* Add CPU changes on this level since the last flush */ 3821 delta_cpu = 0; 3822 v = READ_ONCE(ac->cstat[i]); 3823 if (v != ac->cstat_prev[i]) { 3824 delta_cpu = v - ac->cstat_prev[i]; 3825 delta += delta_cpu; 3826 ac->cstat_prev[i] = v; 3827 } 3828 3829 /* Aggregate counts on this level and propagate upwards */ 3830 if (delta_cpu) 3831 ac->local[i] += delta_cpu; 3832 3833 if (delta) { 3834 ac->aggregate[i] += delta; 3835 if (ac->ppending) 3836 ac->ppending[i] += delta; 3837 } 3838 } 3839 } 3840 3841 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 3842 { 3843 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3844 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 3845 struct memcg_vmstats_percpu *statc; 3846 struct aggregate_control ac; 3847 int nid; 3848 3849 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3850 3851 ac = (struct aggregate_control) { 3852 .aggregate = memcg->vmstats->state, 3853 .local = memcg->vmstats->state_local, 3854 .pending = memcg->vmstats->state_pending, 3855 .ppending = parent ? parent->vmstats->state_pending : NULL, 3856 .cstat = statc->state, 3857 .cstat_prev = statc->state_prev, 3858 .size = MEMCG_VMSTAT_SIZE, 3859 }; 3860 mem_cgroup_stat_aggregate(&ac); 3861 3862 ac = (struct aggregate_control) { 3863 .aggregate = memcg->vmstats->events, 3864 .local = memcg->vmstats->events_local, 3865 .pending = memcg->vmstats->events_pending, 3866 .ppending = parent ? parent->vmstats->events_pending : NULL, 3867 .cstat = statc->events, 3868 .cstat_prev = statc->events_prev, 3869 .size = NR_MEMCG_EVENTS, 3870 }; 3871 mem_cgroup_stat_aggregate(&ac); 3872 3873 for_each_node_state(nid, N_MEMORY) { 3874 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 3875 struct lruvec_stats *lstats = pn->lruvec_stats; 3876 struct lruvec_stats *plstats = NULL; 3877 struct lruvec_stats_percpu *lstatc; 3878 3879 if (parent) 3880 plstats = parent->nodeinfo[nid]->lruvec_stats; 3881 3882 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 3883 3884 ac = (struct aggregate_control) { 3885 .aggregate = lstats->state, 3886 .local = lstats->state_local, 3887 .pending = lstats->state_pending, 3888 .ppending = plstats ? plstats->state_pending : NULL, 3889 .cstat = lstatc->state, 3890 .cstat_prev = lstatc->state_prev, 3891 .size = NR_MEMCG_NODE_STAT_ITEMS, 3892 }; 3893 mem_cgroup_stat_aggregate(&ac); 3894 3895 } 3896 WRITE_ONCE(statc->stats_updates, 0); 3897 /* We are in a per-cpu loop here, only do the atomic write once */ 3898 if (atomic64_read(&memcg->vmstats->stats_updates)) 3899 atomic64_set(&memcg->vmstats->stats_updates, 0); 3900 } 3901 3902 static void mem_cgroup_fork(struct task_struct *task) 3903 { 3904 /* 3905 * Set the update flag to cause task->objcg to be initialized lazily 3906 * on the first allocation. It can be done without any synchronization 3907 * because it's always performed on the current task, so does 3908 * current_objcg_update(). 3909 */ 3910 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; 3911 } 3912 3913 static void mem_cgroup_exit(struct task_struct *task) 3914 { 3915 struct obj_cgroup *objcg = task->objcg; 3916 3917 objcg = (struct obj_cgroup *) 3918 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); 3919 obj_cgroup_put(objcg); 3920 3921 /* 3922 * Some kernel allocations can happen after this point, 3923 * but let's ignore them. It can be done without any synchronization 3924 * because it's always performed on the current task, so does 3925 * current_objcg_update(). 3926 */ 3927 task->objcg = NULL; 3928 } 3929 3930 #ifdef CONFIG_LRU_GEN 3931 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) 3932 { 3933 struct task_struct *task; 3934 struct cgroup_subsys_state *css; 3935 3936 /* find the first leader if there is any */ 3937 cgroup_taskset_for_each_leader(task, css, tset) 3938 break; 3939 3940 if (!task) 3941 return; 3942 3943 task_lock(task); 3944 if (task->mm && READ_ONCE(task->mm->owner) == task) 3945 lru_gen_migrate_mm(task->mm); 3946 task_unlock(task); 3947 } 3948 #else 3949 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {} 3950 #endif /* CONFIG_LRU_GEN */ 3951 3952 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) 3953 { 3954 struct task_struct *task; 3955 struct cgroup_subsys_state *css; 3956 3957 cgroup_taskset_for_each(task, css, tset) { 3958 /* atomically set the update bit */ 3959 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); 3960 } 3961 } 3962 3963 static void mem_cgroup_attach(struct cgroup_taskset *tset) 3964 { 3965 mem_cgroup_lru_gen_attach(tset); 3966 mem_cgroup_kmem_attach(tset); 3967 } 3968 3969 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 3970 { 3971 if (value == PAGE_COUNTER_MAX) 3972 seq_puts(m, "max\n"); 3973 else 3974 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 3975 3976 return 0; 3977 } 3978 3979 static u64 memory_current_read(struct cgroup_subsys_state *css, 3980 struct cftype *cft) 3981 { 3982 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3983 3984 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 3985 } 3986 3987 #define OFP_PEAK_UNSET (((-1UL))) 3988 3989 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc) 3990 { 3991 struct cgroup_of_peak *ofp = of_peak(sf->private); 3992 u64 fd_peak = READ_ONCE(ofp->value), peak; 3993 3994 /* User wants global or local peak? */ 3995 if (fd_peak == OFP_PEAK_UNSET) 3996 peak = pc->watermark; 3997 else 3998 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); 3999 4000 seq_printf(sf, "%llu\n", peak * PAGE_SIZE); 4001 return 0; 4002 } 4003 4004 static int memory_peak_show(struct seq_file *sf, void *v) 4005 { 4006 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 4007 4008 return peak_show(sf, v, &memcg->memory); 4009 } 4010 4011 static int peak_open(struct kernfs_open_file *of) 4012 { 4013 struct cgroup_of_peak *ofp = of_peak(of); 4014 4015 ofp->value = OFP_PEAK_UNSET; 4016 return 0; 4017 } 4018 4019 static void peak_release(struct kernfs_open_file *of) 4020 { 4021 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4022 struct cgroup_of_peak *ofp = of_peak(of); 4023 4024 if (ofp->value == OFP_PEAK_UNSET) { 4025 /* fast path (no writes on this fd) */ 4026 return; 4027 } 4028 spin_lock(&memcg->peaks_lock); 4029 list_del(&ofp->list); 4030 spin_unlock(&memcg->peaks_lock); 4031 } 4032 4033 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, 4034 loff_t off, struct page_counter *pc, 4035 struct list_head *watchers) 4036 { 4037 unsigned long usage; 4038 struct cgroup_of_peak *peer_ctx; 4039 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4040 struct cgroup_of_peak *ofp = of_peak(of); 4041 4042 spin_lock(&memcg->peaks_lock); 4043 4044 usage = page_counter_read(pc); 4045 WRITE_ONCE(pc->local_watermark, usage); 4046 4047 list_for_each_entry(peer_ctx, watchers, list) 4048 if (usage > peer_ctx->value) 4049 WRITE_ONCE(peer_ctx->value, usage); 4050 4051 /* initial write, register watcher */ 4052 if (ofp->value == OFP_PEAK_UNSET) 4053 list_add(&ofp->list, watchers); 4054 4055 WRITE_ONCE(ofp->value, usage); 4056 spin_unlock(&memcg->peaks_lock); 4057 4058 return nbytes; 4059 } 4060 4061 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf, 4062 size_t nbytes, loff_t off) 4063 { 4064 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4065 4066 return peak_write(of, buf, nbytes, off, &memcg->memory, 4067 &memcg->memory_peaks); 4068 } 4069 4070 #undef OFP_PEAK_UNSET 4071 4072 static int memory_min_show(struct seq_file *m, void *v) 4073 { 4074 return seq_puts_memcg_tunable(m, 4075 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 4076 } 4077 4078 static ssize_t memory_min_write(struct kernfs_open_file *of, 4079 char *buf, size_t nbytes, loff_t off) 4080 { 4081 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4082 unsigned long min; 4083 int err; 4084 4085 buf = strstrip(buf); 4086 err = page_counter_memparse(buf, "max", &min); 4087 if (err) 4088 return err; 4089 4090 page_counter_set_min(&memcg->memory, min); 4091 4092 return nbytes; 4093 } 4094 4095 static int memory_low_show(struct seq_file *m, void *v) 4096 { 4097 return seq_puts_memcg_tunable(m, 4098 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 4099 } 4100 4101 static ssize_t memory_low_write(struct kernfs_open_file *of, 4102 char *buf, size_t nbytes, loff_t off) 4103 { 4104 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4105 unsigned long low; 4106 int err; 4107 4108 buf = strstrip(buf); 4109 err = page_counter_memparse(buf, "max", &low); 4110 if (err) 4111 return err; 4112 4113 page_counter_set_low(&memcg->memory, low); 4114 4115 return nbytes; 4116 } 4117 4118 static int memory_high_show(struct seq_file *m, void *v) 4119 { 4120 return seq_puts_memcg_tunable(m, 4121 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 4122 } 4123 4124 static ssize_t memory_high_write(struct kernfs_open_file *of, 4125 char *buf, size_t nbytes, loff_t off) 4126 { 4127 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4128 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4129 bool drained = false; 4130 unsigned long high; 4131 int err; 4132 4133 buf = strstrip(buf); 4134 err = page_counter_memparse(buf, "max", &high); 4135 if (err) 4136 return err; 4137 4138 page_counter_set_high(&memcg->memory, high); 4139 4140 for (;;) { 4141 unsigned long nr_pages = page_counter_read(&memcg->memory); 4142 unsigned long reclaimed; 4143 4144 if (nr_pages <= high) 4145 break; 4146 4147 if (signal_pending(current)) 4148 break; 4149 4150 if (!drained) { 4151 drain_all_stock(memcg); 4152 drained = true; 4153 continue; 4154 } 4155 4156 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 4157 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); 4158 4159 if (!reclaimed && !nr_retries--) 4160 break; 4161 } 4162 4163 memcg_wb_domain_size_changed(memcg); 4164 return nbytes; 4165 } 4166 4167 static int memory_max_show(struct seq_file *m, void *v) 4168 { 4169 return seq_puts_memcg_tunable(m, 4170 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 4171 } 4172 4173 static ssize_t memory_max_write(struct kernfs_open_file *of, 4174 char *buf, size_t nbytes, loff_t off) 4175 { 4176 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4177 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 4178 bool drained = false; 4179 unsigned long max; 4180 int err; 4181 4182 buf = strstrip(buf); 4183 err = page_counter_memparse(buf, "max", &max); 4184 if (err) 4185 return err; 4186 4187 xchg(&memcg->memory.max, max); 4188 4189 for (;;) { 4190 unsigned long nr_pages = page_counter_read(&memcg->memory); 4191 4192 if (nr_pages <= max) 4193 break; 4194 4195 if (signal_pending(current)) 4196 break; 4197 4198 if (!drained) { 4199 drain_all_stock(memcg); 4200 drained = true; 4201 continue; 4202 } 4203 4204 if (nr_reclaims) { 4205 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 4206 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) 4207 nr_reclaims--; 4208 continue; 4209 } 4210 4211 memcg_memory_event(memcg, MEMCG_OOM); 4212 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 4213 break; 4214 cond_resched(); 4215 } 4216 4217 memcg_wb_domain_size_changed(memcg); 4218 return nbytes; 4219 } 4220 4221 /* 4222 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener' 4223 * if any new events become available. 4224 */ 4225 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 4226 { 4227 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 4228 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 4229 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 4230 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 4231 seq_printf(m, "oom_kill %lu\n", 4232 atomic_long_read(&events[MEMCG_OOM_KILL])); 4233 seq_printf(m, "oom_group_kill %lu\n", 4234 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 4235 } 4236 4237 static int memory_events_show(struct seq_file *m, void *v) 4238 { 4239 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4240 4241 __memory_events_show(m, memcg->memory_events); 4242 return 0; 4243 } 4244 4245 static int memory_events_local_show(struct seq_file *m, void *v) 4246 { 4247 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4248 4249 __memory_events_show(m, memcg->memory_events_local); 4250 return 0; 4251 } 4252 4253 int memory_stat_show(struct seq_file *m, void *v) 4254 { 4255 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4256 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL); 4257 struct seq_buf s; 4258 4259 if (!buf) 4260 return -ENOMEM; 4261 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 4262 memory_stat_format(memcg, &s); 4263 seq_puts(m, buf); 4264 kfree(buf); 4265 return 0; 4266 } 4267 4268 #ifdef CONFIG_NUMA 4269 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 4270 int item) 4271 { 4272 return lruvec_page_state(lruvec, item) * 4273 memcg_page_state_output_unit(item); 4274 } 4275 4276 static int memory_numa_stat_show(struct seq_file *m, void *v) 4277 { 4278 int i; 4279 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4280 4281 mem_cgroup_flush_stats(memcg); 4282 4283 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 4284 int nid; 4285 4286 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 4287 continue; 4288 4289 seq_printf(m, "%s", memory_stats[i].name); 4290 for_each_node_state(nid, N_MEMORY) { 4291 u64 size; 4292 struct lruvec *lruvec; 4293 4294 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 4295 size = lruvec_page_state_output(lruvec, 4296 memory_stats[i].idx); 4297 seq_printf(m, " N%d=%llu", nid, size); 4298 } 4299 seq_putc(m, '\n'); 4300 } 4301 4302 return 0; 4303 } 4304 #endif 4305 4306 static int memory_oom_group_show(struct seq_file *m, void *v) 4307 { 4308 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4309 4310 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 4311 4312 return 0; 4313 } 4314 4315 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 4316 char *buf, size_t nbytes, loff_t off) 4317 { 4318 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4319 int ret, oom_group; 4320 4321 buf = strstrip(buf); 4322 if (!buf) 4323 return -EINVAL; 4324 4325 ret = kstrtoint(buf, 0, &oom_group); 4326 if (ret) 4327 return ret; 4328 4329 if (oom_group != 0 && oom_group != 1) 4330 return -EINVAL; 4331 4332 WRITE_ONCE(memcg->oom_group, oom_group); 4333 4334 return nbytes; 4335 } 4336 4337 enum { 4338 MEMORY_RECLAIM_SWAPPINESS = 0, 4339 MEMORY_RECLAIM_NULL, 4340 }; 4341 4342 static const match_table_t tokens = { 4343 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"}, 4344 { MEMORY_RECLAIM_NULL, NULL }, 4345 }; 4346 4347 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 4348 size_t nbytes, loff_t off) 4349 { 4350 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4351 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4352 unsigned long nr_to_reclaim, nr_reclaimed = 0; 4353 int swappiness = -1; 4354 unsigned int reclaim_options; 4355 char *old_buf, *start; 4356 substring_t args[MAX_OPT_ARGS]; 4357 4358 buf = strstrip(buf); 4359 4360 old_buf = buf; 4361 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE; 4362 if (buf == old_buf) 4363 return -EINVAL; 4364 4365 buf = strstrip(buf); 4366 4367 while ((start = strsep(&buf, " ")) != NULL) { 4368 if (!strlen(start)) 4369 continue; 4370 switch (match_token(start, tokens, args)) { 4371 case MEMORY_RECLAIM_SWAPPINESS: 4372 if (match_int(&args[0], &swappiness)) 4373 return -EINVAL; 4374 if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS) 4375 return -EINVAL; 4376 break; 4377 default: 4378 return -EINVAL; 4379 } 4380 } 4381 4382 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; 4383 while (nr_reclaimed < nr_to_reclaim) { 4384 /* Will converge on zero, but reclaim enforces a minimum */ 4385 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4; 4386 unsigned long reclaimed; 4387 4388 if (signal_pending(current)) 4389 return -EINTR; 4390 4391 /* 4392 * This is the final attempt, drain percpu lru caches in the 4393 * hope of introducing more evictable pages for 4394 * try_to_free_mem_cgroup_pages(). 4395 */ 4396 if (!nr_retries) 4397 lru_add_drain_all(); 4398 4399 reclaimed = try_to_free_mem_cgroup_pages(memcg, 4400 batch_size, GFP_KERNEL, 4401 reclaim_options, 4402 swappiness == -1 ? NULL : &swappiness); 4403 4404 if (!reclaimed && !nr_retries--) 4405 return -EAGAIN; 4406 4407 nr_reclaimed += reclaimed; 4408 } 4409 4410 return nbytes; 4411 } 4412 4413 static struct cftype memory_files[] = { 4414 { 4415 .name = "current", 4416 .flags = CFTYPE_NOT_ON_ROOT, 4417 .read_u64 = memory_current_read, 4418 }, 4419 { 4420 .name = "peak", 4421 .flags = CFTYPE_NOT_ON_ROOT, 4422 .open = peak_open, 4423 .release = peak_release, 4424 .seq_show = memory_peak_show, 4425 .write = memory_peak_write, 4426 }, 4427 { 4428 .name = "min", 4429 .flags = CFTYPE_NOT_ON_ROOT, 4430 .seq_show = memory_min_show, 4431 .write = memory_min_write, 4432 }, 4433 { 4434 .name = "low", 4435 .flags = CFTYPE_NOT_ON_ROOT, 4436 .seq_show = memory_low_show, 4437 .write = memory_low_write, 4438 }, 4439 { 4440 .name = "high", 4441 .flags = CFTYPE_NOT_ON_ROOT, 4442 .seq_show = memory_high_show, 4443 .write = memory_high_write, 4444 }, 4445 { 4446 .name = "max", 4447 .flags = CFTYPE_NOT_ON_ROOT, 4448 .seq_show = memory_max_show, 4449 .write = memory_max_write, 4450 }, 4451 { 4452 .name = "events", 4453 .flags = CFTYPE_NOT_ON_ROOT, 4454 .file_offset = offsetof(struct mem_cgroup, events_file), 4455 .seq_show = memory_events_show, 4456 }, 4457 { 4458 .name = "events.local", 4459 .flags = CFTYPE_NOT_ON_ROOT, 4460 .file_offset = offsetof(struct mem_cgroup, events_local_file), 4461 .seq_show = memory_events_local_show, 4462 }, 4463 { 4464 .name = "stat", 4465 .seq_show = memory_stat_show, 4466 }, 4467 #ifdef CONFIG_NUMA 4468 { 4469 .name = "numa_stat", 4470 .seq_show = memory_numa_stat_show, 4471 }, 4472 #endif 4473 { 4474 .name = "oom.group", 4475 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 4476 .seq_show = memory_oom_group_show, 4477 .write = memory_oom_group_write, 4478 }, 4479 { 4480 .name = "reclaim", 4481 .flags = CFTYPE_NS_DELEGATABLE, 4482 .write = memory_reclaim, 4483 }, 4484 { } /* terminate */ 4485 }; 4486 4487 struct cgroup_subsys memory_cgrp_subsys = { 4488 .css_alloc = mem_cgroup_css_alloc, 4489 .css_online = mem_cgroup_css_online, 4490 .css_offline = mem_cgroup_css_offline, 4491 .css_released = mem_cgroup_css_released, 4492 .css_free = mem_cgroup_css_free, 4493 .css_reset = mem_cgroup_css_reset, 4494 .css_rstat_flush = mem_cgroup_css_rstat_flush, 4495 .attach = mem_cgroup_attach, 4496 .fork = mem_cgroup_fork, 4497 .exit = mem_cgroup_exit, 4498 .dfl_cftypes = memory_files, 4499 #ifdef CONFIG_MEMCG_V1 4500 .legacy_cftypes = mem_cgroup_legacy_files, 4501 #endif 4502 .early_init = 0, 4503 }; 4504 4505 /** 4506 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 4507 * @root: the top ancestor of the sub-tree being checked 4508 * @memcg: the memory cgroup to check 4509 * 4510 * WARNING: This function is not stateless! It can only be used as part 4511 * of a top-down tree iteration, not for isolated queries. 4512 */ 4513 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 4514 struct mem_cgroup *memcg) 4515 { 4516 bool recursive_protection = 4517 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT; 4518 4519 if (mem_cgroup_disabled()) 4520 return; 4521 4522 if (!root) 4523 root = root_mem_cgroup; 4524 4525 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); 4526 } 4527 4528 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 4529 gfp_t gfp) 4530 { 4531 int ret; 4532 4533 ret = try_charge(memcg, gfp, folio_nr_pages(folio)); 4534 if (ret) 4535 goto out; 4536 4537 css_get(&memcg->css); 4538 commit_charge(folio, memcg); 4539 memcg1_commit_charge(folio, memcg); 4540 out: 4541 return ret; 4542 } 4543 4544 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 4545 { 4546 struct mem_cgroup *memcg; 4547 int ret; 4548 4549 memcg = get_mem_cgroup_from_mm(mm); 4550 ret = charge_memcg(folio, memcg, gfp); 4551 css_put(&memcg->css); 4552 4553 return ret; 4554 } 4555 4556 /** 4557 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio 4558 * @folio: folio being charged 4559 * @gfp: reclaim mode 4560 * 4561 * This function is called when allocating a huge page folio, after the page has 4562 * already been obtained and charged to the appropriate hugetlb cgroup 4563 * controller (if it is enabled). 4564 * 4565 * Returns ENOMEM if the memcg is already full. 4566 * Returns 0 if either the charge was successful, or if we skip the charging. 4567 */ 4568 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) 4569 { 4570 struct mem_cgroup *memcg = get_mem_cgroup_from_current(); 4571 int ret = 0; 4572 4573 /* 4574 * Even memcg does not account for hugetlb, we still want to update 4575 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip 4576 * charging the memcg. 4577 */ 4578 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || 4579 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4580 goto out; 4581 4582 if (charge_memcg(folio, memcg, gfp)) 4583 ret = -ENOMEM; 4584 4585 out: 4586 mem_cgroup_put(memcg); 4587 return ret; 4588 } 4589 4590 /** 4591 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 4592 * @folio: folio to charge. 4593 * @mm: mm context of the victim 4594 * @gfp: reclaim mode 4595 * @entry: swap entry for which the folio is allocated 4596 * 4597 * This function charges a folio allocated for swapin. Please call this before 4598 * adding the folio to the swapcache. 4599 * 4600 * Returns 0 on success. Otherwise, an error code is returned. 4601 */ 4602 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 4603 gfp_t gfp, swp_entry_t entry) 4604 { 4605 struct mem_cgroup *memcg; 4606 unsigned short id; 4607 int ret; 4608 4609 if (mem_cgroup_disabled()) 4610 return 0; 4611 4612 id = lookup_swap_cgroup_id(entry); 4613 rcu_read_lock(); 4614 memcg = mem_cgroup_from_id(id); 4615 if (!memcg || !css_tryget_online(&memcg->css)) 4616 memcg = get_mem_cgroup_from_mm(mm); 4617 rcu_read_unlock(); 4618 4619 ret = charge_memcg(folio, memcg, gfp); 4620 4621 css_put(&memcg->css); 4622 return ret; 4623 } 4624 4625 struct uncharge_gather { 4626 struct mem_cgroup *memcg; 4627 unsigned long nr_memory; 4628 unsigned long pgpgout; 4629 unsigned long nr_kmem; 4630 int nid; 4631 }; 4632 4633 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 4634 { 4635 memset(ug, 0, sizeof(*ug)); 4636 } 4637 4638 static void uncharge_batch(const struct uncharge_gather *ug) 4639 { 4640 if (ug->nr_memory) { 4641 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 4642 if (do_memsw_account()) 4643 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 4644 if (ug->nr_kmem) { 4645 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); 4646 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); 4647 } 4648 memcg1_oom_recover(ug->memcg); 4649 } 4650 4651 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); 4652 4653 /* drop reference from uncharge_folio */ 4654 css_put(&ug->memcg->css); 4655 } 4656 4657 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 4658 { 4659 long nr_pages; 4660 struct mem_cgroup *memcg; 4661 struct obj_cgroup *objcg; 4662 4663 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4664 4665 /* 4666 * Nobody should be changing or seriously looking at 4667 * folio memcg or objcg at this point, we have fully 4668 * exclusive access to the folio. 4669 */ 4670 if (folio_memcg_kmem(folio)) { 4671 objcg = __folio_objcg(folio); 4672 /* 4673 * This get matches the put at the end of the function and 4674 * kmem pages do not hold memcg references anymore. 4675 */ 4676 memcg = get_mem_cgroup_from_objcg(objcg); 4677 } else { 4678 memcg = __folio_memcg(folio); 4679 } 4680 4681 if (!memcg) 4682 return; 4683 4684 if (ug->memcg != memcg) { 4685 if (ug->memcg) { 4686 uncharge_batch(ug); 4687 uncharge_gather_clear(ug); 4688 } 4689 ug->memcg = memcg; 4690 ug->nid = folio_nid(folio); 4691 4692 /* pairs with css_put in uncharge_batch */ 4693 css_get(&memcg->css); 4694 } 4695 4696 nr_pages = folio_nr_pages(folio); 4697 4698 if (folio_memcg_kmem(folio)) { 4699 ug->nr_memory += nr_pages; 4700 ug->nr_kmem += nr_pages; 4701 4702 folio->memcg_data = 0; 4703 obj_cgroup_put(objcg); 4704 } else { 4705 /* LRU pages aren't accounted at the root level */ 4706 if (!mem_cgroup_is_root(memcg)) 4707 ug->nr_memory += nr_pages; 4708 ug->pgpgout++; 4709 4710 WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); 4711 folio->memcg_data = 0; 4712 } 4713 4714 css_put(&memcg->css); 4715 } 4716 4717 void __mem_cgroup_uncharge(struct folio *folio) 4718 { 4719 struct uncharge_gather ug; 4720 4721 /* Don't touch folio->lru of any random page, pre-check: */ 4722 if (!folio_memcg_charged(folio)) 4723 return; 4724 4725 uncharge_gather_clear(&ug); 4726 uncharge_folio(folio, &ug); 4727 uncharge_batch(&ug); 4728 } 4729 4730 void __mem_cgroup_uncharge_folios(struct folio_batch *folios) 4731 { 4732 struct uncharge_gather ug; 4733 unsigned int i; 4734 4735 uncharge_gather_clear(&ug); 4736 for (i = 0; i < folios->nr; i++) 4737 uncharge_folio(folios->folios[i], &ug); 4738 if (ug.memcg) 4739 uncharge_batch(&ug); 4740 } 4741 4742 /** 4743 * mem_cgroup_replace_folio - Charge a folio's replacement. 4744 * @old: Currently circulating folio. 4745 * @new: Replacement folio. 4746 * 4747 * Charge @new as a replacement folio for @old. @old will 4748 * be uncharged upon free. 4749 * 4750 * Both folios must be locked, @new->mapping must be set up. 4751 */ 4752 void mem_cgroup_replace_folio(struct folio *old, struct folio *new) 4753 { 4754 struct mem_cgroup *memcg; 4755 long nr_pages = folio_nr_pages(new); 4756 4757 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 4758 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 4759 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 4760 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 4761 4762 if (mem_cgroup_disabled()) 4763 return; 4764 4765 /* Page cache replacement: new folio already charged? */ 4766 if (folio_memcg_charged(new)) 4767 return; 4768 4769 memcg = folio_memcg(old); 4770 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 4771 if (!memcg) 4772 return; 4773 4774 /* Force-charge the new page. The old one will be freed soon */ 4775 if (!mem_cgroup_is_root(memcg)) { 4776 page_counter_charge(&memcg->memory, nr_pages); 4777 if (do_memsw_account()) 4778 page_counter_charge(&memcg->memsw, nr_pages); 4779 } 4780 4781 css_get(&memcg->css); 4782 commit_charge(new, memcg); 4783 memcg1_commit_charge(new, memcg); 4784 } 4785 4786 /** 4787 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio. 4788 * @old: Currently circulating folio. 4789 * @new: Replacement folio. 4790 * 4791 * Transfer the memcg data from the old folio to the new folio for migration. 4792 * The old folio's data info will be cleared. Note that the memory counters 4793 * will remain unchanged throughout the process. 4794 * 4795 * Both folios must be locked, @new->mapping must be set up. 4796 */ 4797 void mem_cgroup_migrate(struct folio *old, struct folio *new) 4798 { 4799 struct mem_cgroup *memcg; 4800 4801 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 4802 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 4803 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 4804 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new); 4805 VM_BUG_ON_FOLIO(folio_test_lru(old), old); 4806 4807 if (mem_cgroup_disabled()) 4808 return; 4809 4810 memcg = folio_memcg(old); 4811 /* 4812 * Note that it is normal to see !memcg for a hugetlb folio. 4813 * For e.g, itt could have been allocated when memory_hugetlb_accounting 4814 * was not selected. 4815 */ 4816 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); 4817 if (!memcg) 4818 return; 4819 4820 /* Transfer the charge and the css ref */ 4821 commit_charge(new, memcg); 4822 4823 /* Warning should never happen, so don't worry about refcount non-0 */ 4824 WARN_ON_ONCE(folio_unqueue_deferred_split(old)); 4825 old->memcg_data = 0; 4826 } 4827 4828 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 4829 EXPORT_SYMBOL(memcg_sockets_enabled_key); 4830 4831 void mem_cgroup_sk_alloc(struct sock *sk) 4832 { 4833 struct mem_cgroup *memcg; 4834 4835 if (!mem_cgroup_sockets_enabled) 4836 return; 4837 4838 /* Do not associate the sock with unrelated interrupted task's memcg. */ 4839 if (!in_task()) 4840 return; 4841 4842 rcu_read_lock(); 4843 memcg = mem_cgroup_from_task(current); 4844 if (mem_cgroup_is_root(memcg)) 4845 goto out; 4846 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) 4847 goto out; 4848 if (css_tryget(&memcg->css)) 4849 sk->sk_memcg = memcg; 4850 out: 4851 rcu_read_unlock(); 4852 } 4853 4854 void mem_cgroup_sk_free(struct sock *sk) 4855 { 4856 if (sk->sk_memcg) 4857 css_put(&sk->sk_memcg->css); 4858 } 4859 4860 /** 4861 * mem_cgroup_charge_skmem - charge socket memory 4862 * @memcg: memcg to charge 4863 * @nr_pages: number of pages to charge 4864 * @gfp_mask: reclaim mode 4865 * 4866 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 4867 * @memcg's configured limit, %false if it doesn't. 4868 */ 4869 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 4870 gfp_t gfp_mask) 4871 { 4872 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4873 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); 4874 4875 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 4876 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 4877 return true; 4878 } 4879 4880 return false; 4881 } 4882 4883 /** 4884 * mem_cgroup_uncharge_skmem - uncharge socket memory 4885 * @memcg: memcg to uncharge 4886 * @nr_pages: number of pages to uncharge 4887 */ 4888 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 4889 { 4890 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 4891 memcg1_uncharge_skmem(memcg, nr_pages); 4892 return; 4893 } 4894 4895 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 4896 4897 refill_stock(memcg, nr_pages); 4898 } 4899 4900 static int __init cgroup_memory(char *s) 4901 { 4902 char *token; 4903 4904 while ((token = strsep(&s, ",")) != NULL) { 4905 if (!*token) 4906 continue; 4907 if (!strcmp(token, "nosocket")) 4908 cgroup_memory_nosocket = true; 4909 if (!strcmp(token, "nokmem")) 4910 cgroup_memory_nokmem = true; 4911 if (!strcmp(token, "nobpf")) 4912 cgroup_memory_nobpf = true; 4913 } 4914 return 1; 4915 } 4916 __setup("cgroup.memory=", cgroup_memory); 4917 4918 /* 4919 * subsys_initcall() for memory controller. 4920 * 4921 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 4922 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 4923 * basically everything that doesn't depend on a specific mem_cgroup structure 4924 * should be initialized from here. 4925 */ 4926 static int __init mem_cgroup_init(void) 4927 { 4928 int cpu; 4929 4930 /* 4931 * Currently s32 type (can refer to struct batched_lruvec_stat) is 4932 * used for per-memcg-per-cpu caching of per-node statistics. In order 4933 * to work fine, we should make sure that the overfill threshold can't 4934 * exceed S32_MAX / PAGE_SIZE. 4935 */ 4936 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 4937 4938 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 4939 memcg_hotplug_cpu_dead); 4940 4941 for_each_possible_cpu(cpu) 4942 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 4943 drain_local_stock); 4944 4945 return 0; 4946 } 4947 subsys_initcall(mem_cgroup_init); 4948 4949 #ifdef CONFIG_SWAP 4950 /** 4951 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 4952 * @folio: folio being added to swap 4953 * @entry: swap entry to charge 4954 * 4955 * Try to charge @folio's memcg for the swap space at @entry. 4956 * 4957 * Returns 0 on success, -ENOMEM on failure. 4958 */ 4959 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 4960 { 4961 unsigned int nr_pages = folio_nr_pages(folio); 4962 struct page_counter *counter; 4963 struct mem_cgroup *memcg; 4964 4965 if (do_memsw_account()) 4966 return 0; 4967 4968 memcg = folio_memcg(folio); 4969 4970 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 4971 if (!memcg) 4972 return 0; 4973 4974 if (!entry.val) { 4975 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 4976 return 0; 4977 } 4978 4979 memcg = mem_cgroup_id_get_online(memcg); 4980 4981 if (!mem_cgroup_is_root(memcg) && 4982 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 4983 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 4984 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 4985 mem_cgroup_id_put(memcg); 4986 return -ENOMEM; 4987 } 4988 4989 /* Get references for the tail pages, too */ 4990 if (nr_pages > 1) 4991 mem_cgroup_id_get_many(memcg, nr_pages - 1); 4992 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 4993 4994 swap_cgroup_record(folio, mem_cgroup_id(memcg), entry); 4995 4996 return 0; 4997 } 4998 4999 /** 5000 * __mem_cgroup_uncharge_swap - uncharge swap space 5001 * @entry: swap entry to uncharge 5002 * @nr_pages: the amount of swap space to uncharge 5003 */ 5004 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 5005 { 5006 struct mem_cgroup *memcg; 5007 unsigned short id; 5008 5009 id = swap_cgroup_clear(entry, nr_pages); 5010 rcu_read_lock(); 5011 memcg = mem_cgroup_from_id(id); 5012 if (memcg) { 5013 if (!mem_cgroup_is_root(memcg)) { 5014 if (do_memsw_account()) 5015 page_counter_uncharge(&memcg->memsw, nr_pages); 5016 else 5017 page_counter_uncharge(&memcg->swap, nr_pages); 5018 } 5019 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 5020 mem_cgroup_id_put_many(memcg, nr_pages); 5021 } 5022 rcu_read_unlock(); 5023 } 5024 5025 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5026 { 5027 long nr_swap_pages = get_nr_swap_pages(); 5028 5029 if (mem_cgroup_disabled() || do_memsw_account()) 5030 return nr_swap_pages; 5031 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 5032 nr_swap_pages = min_t(long, nr_swap_pages, 5033 READ_ONCE(memcg->swap.max) - 5034 page_counter_read(&memcg->swap)); 5035 return nr_swap_pages; 5036 } 5037 5038 bool mem_cgroup_swap_full(struct folio *folio) 5039 { 5040 struct mem_cgroup *memcg; 5041 5042 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5043 5044 if (vm_swap_full()) 5045 return true; 5046 if (do_memsw_account()) 5047 return false; 5048 5049 memcg = folio_memcg(folio); 5050 if (!memcg) 5051 return false; 5052 5053 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 5054 unsigned long usage = page_counter_read(&memcg->swap); 5055 5056 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 5057 usage * 2 >= READ_ONCE(memcg->swap.max)) 5058 return true; 5059 } 5060 5061 return false; 5062 } 5063 5064 static int __init setup_swap_account(char *s) 5065 { 5066 bool res; 5067 5068 if (!kstrtobool(s, &res) && !res) 5069 pr_warn_once("The swapaccount=0 commandline option is deprecated " 5070 "in favor of configuring swap control via cgroupfs. " 5071 "Please report your usecase to linux-mm@kvack.org if you " 5072 "depend on this functionality.\n"); 5073 return 1; 5074 } 5075 __setup("swapaccount=", setup_swap_account); 5076 5077 static u64 swap_current_read(struct cgroup_subsys_state *css, 5078 struct cftype *cft) 5079 { 5080 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5081 5082 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 5083 } 5084 5085 static int swap_peak_show(struct seq_file *sf, void *v) 5086 { 5087 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 5088 5089 return peak_show(sf, v, &memcg->swap); 5090 } 5091 5092 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf, 5093 size_t nbytes, loff_t off) 5094 { 5095 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5096 5097 return peak_write(of, buf, nbytes, off, &memcg->swap, 5098 &memcg->swap_peaks); 5099 } 5100 5101 static int swap_high_show(struct seq_file *m, void *v) 5102 { 5103 return seq_puts_memcg_tunable(m, 5104 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 5105 } 5106 5107 static ssize_t swap_high_write(struct kernfs_open_file *of, 5108 char *buf, size_t nbytes, loff_t off) 5109 { 5110 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5111 unsigned long high; 5112 int err; 5113 5114 buf = strstrip(buf); 5115 err = page_counter_memparse(buf, "max", &high); 5116 if (err) 5117 return err; 5118 5119 page_counter_set_high(&memcg->swap, high); 5120 5121 return nbytes; 5122 } 5123 5124 static int swap_max_show(struct seq_file *m, void *v) 5125 { 5126 return seq_puts_memcg_tunable(m, 5127 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 5128 } 5129 5130 static ssize_t swap_max_write(struct kernfs_open_file *of, 5131 char *buf, size_t nbytes, loff_t off) 5132 { 5133 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5134 unsigned long max; 5135 int err; 5136 5137 buf = strstrip(buf); 5138 err = page_counter_memparse(buf, "max", &max); 5139 if (err) 5140 return err; 5141 5142 xchg(&memcg->swap.max, max); 5143 5144 return nbytes; 5145 } 5146 5147 static int swap_events_show(struct seq_file *m, void *v) 5148 { 5149 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5150 5151 seq_printf(m, "high %lu\n", 5152 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 5153 seq_printf(m, "max %lu\n", 5154 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 5155 seq_printf(m, "fail %lu\n", 5156 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 5157 5158 return 0; 5159 } 5160 5161 static struct cftype swap_files[] = { 5162 { 5163 .name = "swap.current", 5164 .flags = CFTYPE_NOT_ON_ROOT, 5165 .read_u64 = swap_current_read, 5166 }, 5167 { 5168 .name = "swap.high", 5169 .flags = CFTYPE_NOT_ON_ROOT, 5170 .seq_show = swap_high_show, 5171 .write = swap_high_write, 5172 }, 5173 { 5174 .name = "swap.max", 5175 .flags = CFTYPE_NOT_ON_ROOT, 5176 .seq_show = swap_max_show, 5177 .write = swap_max_write, 5178 }, 5179 { 5180 .name = "swap.peak", 5181 .flags = CFTYPE_NOT_ON_ROOT, 5182 .open = peak_open, 5183 .release = peak_release, 5184 .seq_show = swap_peak_show, 5185 .write = swap_peak_write, 5186 }, 5187 { 5188 .name = "swap.events", 5189 .flags = CFTYPE_NOT_ON_ROOT, 5190 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 5191 .seq_show = swap_events_show, 5192 }, 5193 { } /* terminate */ 5194 }; 5195 5196 #ifdef CONFIG_ZSWAP 5197 /** 5198 * obj_cgroup_may_zswap - check if this cgroup can zswap 5199 * @objcg: the object cgroup 5200 * 5201 * Check if the hierarchical zswap limit has been reached. 5202 * 5203 * This doesn't check for specific headroom, and it is not atomic 5204 * either. But with zswap, the size of the allocation is only known 5205 * once compression has occurred, and this optimistic pre-check avoids 5206 * spending cycles on compression when there is already no room left 5207 * or zswap is disabled altogether somewhere in the hierarchy. 5208 */ 5209 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 5210 { 5211 struct mem_cgroup *memcg, *original_memcg; 5212 bool ret = true; 5213 5214 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5215 return true; 5216 5217 original_memcg = get_mem_cgroup_from_objcg(objcg); 5218 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 5219 memcg = parent_mem_cgroup(memcg)) { 5220 unsigned long max = READ_ONCE(memcg->zswap_max); 5221 unsigned long pages; 5222 5223 if (max == PAGE_COUNTER_MAX) 5224 continue; 5225 if (max == 0) { 5226 ret = false; 5227 break; 5228 } 5229 5230 /* Force flush to get accurate stats for charging */ 5231 __mem_cgroup_flush_stats(memcg, true); 5232 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 5233 if (pages < max) 5234 continue; 5235 ret = false; 5236 break; 5237 } 5238 mem_cgroup_put(original_memcg); 5239 return ret; 5240 } 5241 5242 /** 5243 * obj_cgroup_charge_zswap - charge compression backend memory 5244 * @objcg: the object cgroup 5245 * @size: size of compressed object 5246 * 5247 * This forces the charge after obj_cgroup_may_zswap() allowed 5248 * compression and storage in zwap for this cgroup to go ahead. 5249 */ 5250 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 5251 { 5252 struct mem_cgroup *memcg; 5253 5254 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5255 return; 5256 5257 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 5258 5259 /* PF_MEMALLOC context, charging must succeed */ 5260 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 5261 VM_WARN_ON_ONCE(1); 5262 5263 rcu_read_lock(); 5264 memcg = obj_cgroup_memcg(objcg); 5265 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 5266 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 5267 rcu_read_unlock(); 5268 } 5269 5270 /** 5271 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 5272 * @objcg: the object cgroup 5273 * @size: size of compressed object 5274 * 5275 * Uncharges zswap memory on page in. 5276 */ 5277 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 5278 { 5279 struct mem_cgroup *memcg; 5280 5281 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5282 return; 5283 5284 obj_cgroup_uncharge(objcg, size); 5285 5286 rcu_read_lock(); 5287 memcg = obj_cgroup_memcg(objcg); 5288 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 5289 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 5290 rcu_read_unlock(); 5291 } 5292 5293 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 5294 { 5295 /* if zswap is disabled, do not block pages going to the swapping device */ 5296 if (!zswap_is_enabled()) 5297 return true; 5298 5299 for (; memcg; memcg = parent_mem_cgroup(memcg)) 5300 if (!READ_ONCE(memcg->zswap_writeback)) 5301 return false; 5302 5303 return true; 5304 } 5305 5306 static u64 zswap_current_read(struct cgroup_subsys_state *css, 5307 struct cftype *cft) 5308 { 5309 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5310 5311 mem_cgroup_flush_stats(memcg); 5312 return memcg_page_state(memcg, MEMCG_ZSWAP_B); 5313 } 5314 5315 static int zswap_max_show(struct seq_file *m, void *v) 5316 { 5317 return seq_puts_memcg_tunable(m, 5318 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 5319 } 5320 5321 static ssize_t zswap_max_write(struct kernfs_open_file *of, 5322 char *buf, size_t nbytes, loff_t off) 5323 { 5324 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5325 unsigned long max; 5326 int err; 5327 5328 buf = strstrip(buf); 5329 err = page_counter_memparse(buf, "max", &max); 5330 if (err) 5331 return err; 5332 5333 xchg(&memcg->zswap_max, max); 5334 5335 return nbytes; 5336 } 5337 5338 static int zswap_writeback_show(struct seq_file *m, void *v) 5339 { 5340 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5341 5342 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); 5343 return 0; 5344 } 5345 5346 static ssize_t zswap_writeback_write(struct kernfs_open_file *of, 5347 char *buf, size_t nbytes, loff_t off) 5348 { 5349 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5350 int zswap_writeback; 5351 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback); 5352 5353 if (parse_ret) 5354 return parse_ret; 5355 5356 if (zswap_writeback != 0 && zswap_writeback != 1) 5357 return -EINVAL; 5358 5359 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); 5360 return nbytes; 5361 } 5362 5363 static struct cftype zswap_files[] = { 5364 { 5365 .name = "zswap.current", 5366 .flags = CFTYPE_NOT_ON_ROOT, 5367 .read_u64 = zswap_current_read, 5368 }, 5369 { 5370 .name = "zswap.max", 5371 .flags = CFTYPE_NOT_ON_ROOT, 5372 .seq_show = zswap_max_show, 5373 .write = zswap_max_write, 5374 }, 5375 { 5376 .name = "zswap.writeback", 5377 .seq_show = zswap_writeback_show, 5378 .write = zswap_writeback_write, 5379 }, 5380 { } /* terminate */ 5381 }; 5382 #endif /* CONFIG_ZSWAP */ 5383 5384 static int __init mem_cgroup_swap_init(void) 5385 { 5386 if (mem_cgroup_disabled()) 5387 return 0; 5388 5389 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 5390 #ifdef CONFIG_MEMCG_V1 5391 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 5392 #endif 5393 #ifdef CONFIG_ZSWAP 5394 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 5395 #endif 5396 return 0; 5397 } 5398 subsys_initcall(mem_cgroup_swap_init); 5399 5400 #endif /* CONFIG_SWAP */ 5401