1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/cgroup-defs.h> 29 #include <linux/page_counter.h> 30 #include <linux/memcontrol.h> 31 #include <linux/cgroup.h> 32 #include <linux/cpuset.h> 33 #include <linux/sched/mm.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/hugetlb.h> 36 #include <linux/pagemap.h> 37 #include <linux/folio_batch.h> 38 #include <linux/vm_event_item.h> 39 #include <linux/smp.h> 40 #include <linux/page-flags.h> 41 #include <linux/backing-dev.h> 42 #include <linux/bit_spinlock.h> 43 #include <linux/rcupdate.h> 44 #include <linux/limits.h> 45 #include <linux/export.h> 46 #include <linux/list.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swapops.h> 51 #include <linux/spinlock.h> 52 #include <linux/fs.h> 53 #include <linux/seq_file.h> 54 #include <linux/vmpressure.h> 55 #include <linux/memremap.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/resume_user_mode.h> 62 #include <linux/psi.h> 63 #include <linux/seq_buf.h> 64 #include <linux/sched/isolation.h> 65 #include <linux/kmemleak.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include "slab.h" 70 #include "memcontrol-v1.h" 71 72 #include <linux/uaccess.h> 73 74 #define CREATE_TRACE_POINTS 75 #include <trace/events/memcg.h> 76 #undef CREATE_TRACE_POINTS 77 78 #include <trace/events/vmscan.h> 79 80 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 81 EXPORT_SYMBOL(memory_cgrp_subsys); 82 83 struct mem_cgroup *root_mem_cgroup __read_mostly; 84 EXPORT_SYMBOL(root_mem_cgroup); 85 86 /* Active memory cgroup to use from an interrupt context */ 87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 89 90 /* Socket memory accounting disabled? */ 91 static bool cgroup_memory_nosocket __ro_after_init; 92 93 /* Kernel memory accounting disabled? */ 94 static bool cgroup_memory_nokmem __ro_after_init; 95 96 /* BPF memory accounting disabled? */ 97 static bool cgroup_memory_nobpf __ro_after_init; 98 99 static struct workqueue_struct *memcg_wq __ro_after_init; 100 101 static struct kmem_cache *memcg_cachep; 102 static struct kmem_cache *memcg_pn_cachep; 103 104 #ifdef CONFIG_CGROUP_WRITEBACK 105 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 106 #endif 107 108 static inline bool task_is_dying(void) 109 { 110 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 111 (current->flags & PF_EXITING); 112 } 113 114 /* Some nice accessors for the vmpressure. */ 115 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 116 { 117 if (!memcg) 118 memcg = root_mem_cgroup; 119 return &memcg->vmpressure; 120 } 121 122 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 123 { 124 return container_of(vmpr, struct mem_cgroup, vmpressure); 125 } 126 127 #define SEQ_BUF_SIZE SZ_4K 128 #define CURRENT_OBJCG_UPDATE_BIT 0 129 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT) 130 131 static DEFINE_SPINLOCK(objcg_lock); 132 133 bool mem_cgroup_kmem_disabled(void) 134 { 135 return cgroup_memory_nokmem; 136 } 137 138 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 139 140 static void obj_cgroup_release(struct percpu_ref *ref) 141 { 142 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 143 unsigned int nr_bytes; 144 unsigned int nr_pages; 145 unsigned long flags; 146 147 /* 148 * At this point all allocated objects are freed, and 149 * objcg->nr_charged_bytes can't have an arbitrary byte value. 150 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 151 * 152 * The following sequence can lead to it: 153 * 1) CPU0: objcg == stock->cached_objcg 154 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 155 * PAGE_SIZE bytes are charged 156 * 3) CPU1: a process from another memcg is allocating something, 157 * the stock if flushed, 158 * objcg->nr_charged_bytes = PAGE_SIZE - 92 159 * 5) CPU0: we do release this object, 160 * 92 bytes are added to stock->nr_bytes 161 * 6) CPU0: stock is flushed, 162 * 92 bytes are added to objcg->nr_charged_bytes 163 * 164 * In the result, nr_charged_bytes == PAGE_SIZE. 165 * This page will be uncharged in obj_cgroup_release(). 166 */ 167 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 168 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 169 nr_pages = nr_bytes >> PAGE_SHIFT; 170 171 if (nr_pages) { 172 struct mem_cgroup *memcg; 173 174 memcg = get_mem_cgroup_from_objcg(objcg); 175 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 176 memcg1_account_kmem(memcg, -nr_pages); 177 if (!mem_cgroup_is_root(memcg)) 178 memcg_uncharge(memcg, nr_pages); 179 mem_cgroup_put(memcg); 180 } 181 182 spin_lock_irqsave(&objcg_lock, flags); 183 list_del(&objcg->list); 184 spin_unlock_irqrestore(&objcg_lock, flags); 185 186 percpu_ref_exit(ref); 187 kfree_rcu(objcg, rcu); 188 } 189 190 static struct obj_cgroup *obj_cgroup_alloc(void) 191 { 192 struct obj_cgroup *objcg; 193 int ret; 194 195 objcg = kzalloc_obj(struct obj_cgroup); 196 if (!objcg) 197 return NULL; 198 199 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 200 GFP_KERNEL); 201 if (ret) { 202 kfree(objcg); 203 return NULL; 204 } 205 INIT_LIST_HEAD(&objcg->list); 206 return objcg; 207 } 208 209 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 210 struct mem_cgroup *parent) 211 { 212 struct obj_cgroup *objcg, *iter; 213 214 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 215 216 spin_lock_irq(&objcg_lock); 217 218 /* 1) Ready to reparent active objcg. */ 219 list_add(&objcg->list, &memcg->objcg_list); 220 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 221 list_for_each_entry(iter, &memcg->objcg_list, list) 222 WRITE_ONCE(iter->memcg, parent); 223 /* 3) Move already reparented objcgs to the parent's list */ 224 list_splice(&memcg->objcg_list, &parent->objcg_list); 225 226 spin_unlock_irq(&objcg_lock); 227 228 percpu_ref_kill(&objcg->refcnt); 229 } 230 231 /* 232 * A lot of the calls to the cache allocation functions are expected to be 233 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are 234 * conditional to this static branch, we'll have to allow modules that does 235 * kmem_cache_alloc and the such to see this symbol as well 236 */ 237 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 238 EXPORT_SYMBOL(memcg_kmem_online_key); 239 240 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 241 EXPORT_SYMBOL(memcg_bpf_enabled_key); 242 243 /** 244 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 245 * @folio: folio of interest 246 * 247 * If memcg is bound to the default hierarchy, css of the memcg associated 248 * with @folio is returned. The returned css remains associated with @folio 249 * until it is released. 250 * 251 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 252 * is returned. 253 */ 254 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 255 { 256 struct mem_cgroup *memcg = folio_memcg(folio); 257 258 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 259 memcg = root_mem_cgroup; 260 261 return &memcg->css; 262 } 263 264 /** 265 * page_cgroup_ino - return inode number of the memcg a page is charged to 266 * @page: the page 267 * 268 * Look up the closest online ancestor of the memory cgroup @page is charged to 269 * and return its inode number or 0 if @page is not charged to any cgroup. It 270 * is safe to call this function without holding a reference to @page. 271 * 272 * Note, this function is inherently racy, because there is nothing to prevent 273 * the cgroup inode from getting torn down and potentially reallocated a moment 274 * after page_cgroup_ino() returns, so it only should be used by callers that 275 * do not care (such as procfs interfaces). 276 */ 277 ino_t page_cgroup_ino(struct page *page) 278 { 279 struct mem_cgroup *memcg; 280 unsigned long ino = 0; 281 282 rcu_read_lock(); 283 /* page_folio() is racy here, but the entire function is racy anyway */ 284 memcg = folio_memcg_check(page_folio(page)); 285 286 while (memcg && !css_is_online(&memcg->css)) 287 memcg = parent_mem_cgroup(memcg); 288 if (memcg) 289 ino = cgroup_ino(memcg->css.cgroup); 290 rcu_read_unlock(); 291 return ino; 292 } 293 EXPORT_SYMBOL_GPL(page_cgroup_ino); 294 295 /* Subset of node_stat_item for memcg stats */ 296 static const unsigned int memcg_node_stat_items[] = { 297 NR_INACTIVE_ANON, 298 NR_ACTIVE_ANON, 299 NR_INACTIVE_FILE, 300 NR_ACTIVE_FILE, 301 NR_UNEVICTABLE, 302 NR_SLAB_RECLAIMABLE_B, 303 NR_SLAB_UNRECLAIMABLE_B, 304 WORKINGSET_REFAULT_ANON, 305 WORKINGSET_REFAULT_FILE, 306 WORKINGSET_ACTIVATE_ANON, 307 WORKINGSET_ACTIVATE_FILE, 308 WORKINGSET_RESTORE_ANON, 309 WORKINGSET_RESTORE_FILE, 310 WORKINGSET_NODERECLAIM, 311 NR_ANON_MAPPED, 312 NR_FILE_MAPPED, 313 NR_FILE_PAGES, 314 NR_FILE_DIRTY, 315 NR_WRITEBACK, 316 NR_SHMEM, 317 NR_SHMEM_THPS, 318 NR_FILE_THPS, 319 NR_ANON_THPS, 320 NR_VMALLOC, 321 NR_KERNEL_STACK_KB, 322 NR_PAGETABLE, 323 NR_SECONDARY_PAGETABLE, 324 #ifdef CONFIG_SWAP 325 NR_SWAPCACHE, 326 #endif 327 #ifdef CONFIG_NUMA_BALANCING 328 PGPROMOTE_SUCCESS, 329 #endif 330 PGDEMOTE_KSWAPD, 331 PGDEMOTE_DIRECT, 332 PGDEMOTE_KHUGEPAGED, 333 PGDEMOTE_PROACTIVE, 334 PGSTEAL_KSWAPD, 335 PGSTEAL_DIRECT, 336 PGSTEAL_KHUGEPAGED, 337 PGSTEAL_PROACTIVE, 338 PGSTEAL_ANON, 339 PGSTEAL_FILE, 340 PGSCAN_KSWAPD, 341 PGSCAN_DIRECT, 342 PGSCAN_KHUGEPAGED, 343 PGSCAN_PROACTIVE, 344 PGSCAN_ANON, 345 PGSCAN_FILE, 346 PGREFILL, 347 #ifdef CONFIG_HUGETLB_PAGE 348 NR_HUGETLB, 349 #endif 350 }; 351 352 static const unsigned int memcg_stat_items[] = { 353 MEMCG_SWAP, 354 MEMCG_SOCK, 355 MEMCG_PERCPU_B, 356 MEMCG_KMEM, 357 MEMCG_ZSWAP_B, 358 MEMCG_ZSWAPPED, 359 MEMCG_ZSWAP_INCOMP, 360 }; 361 362 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items) 363 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \ 364 ARRAY_SIZE(memcg_stat_items)) 365 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX) 366 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly; 367 368 static void init_memcg_stats(void) 369 { 370 u8 i, j = 0; 371 372 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX); 373 374 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index)); 375 376 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j) 377 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j; 378 379 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j) 380 mem_cgroup_stats_index[memcg_stat_items[i]] = j; 381 } 382 383 static inline int memcg_stats_index(int idx) 384 { 385 return mem_cgroup_stats_index[idx]; 386 } 387 388 struct lruvec_stats_percpu { 389 /* Local (CPU and cgroup) state */ 390 long state[NR_MEMCG_NODE_STAT_ITEMS]; 391 392 /* Delta calculation for lockless upward propagation */ 393 long state_prev[NR_MEMCG_NODE_STAT_ITEMS]; 394 }; 395 396 struct lruvec_stats { 397 /* Aggregated (CPU and subtree) state */ 398 long state[NR_MEMCG_NODE_STAT_ITEMS]; 399 400 /* Non-hierarchical (CPU aggregated) state */ 401 long state_local[NR_MEMCG_NODE_STAT_ITEMS]; 402 403 /* Pending child counts during tree propagation */ 404 long state_pending[NR_MEMCG_NODE_STAT_ITEMS]; 405 }; 406 407 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) 408 { 409 struct mem_cgroup_per_node *pn; 410 long x; 411 int i; 412 413 if (mem_cgroup_disabled()) 414 return node_page_state(lruvec_pgdat(lruvec), idx); 415 416 i = memcg_stats_index(idx); 417 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 418 return 0; 419 420 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 421 x = READ_ONCE(pn->lruvec_stats->state[i]); 422 #ifdef CONFIG_SMP 423 if (x < 0) 424 x = 0; 425 #endif 426 return x; 427 } 428 429 unsigned long lruvec_page_state_local(struct lruvec *lruvec, 430 enum node_stat_item idx) 431 { 432 struct mem_cgroup_per_node *pn; 433 long x; 434 int i; 435 436 if (mem_cgroup_disabled()) 437 return node_page_state(lruvec_pgdat(lruvec), idx); 438 439 i = memcg_stats_index(idx); 440 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 441 return 0; 442 443 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 444 x = READ_ONCE(pn->lruvec_stats->state_local[i]); 445 #ifdef CONFIG_SMP 446 if (x < 0) 447 x = 0; 448 #endif 449 return x; 450 } 451 452 /* Subset of vm_event_item to report for memcg event stats */ 453 static const unsigned int memcg_vm_event_stat[] = { 454 #ifdef CONFIG_MEMCG_V1 455 PGPGIN, 456 PGPGOUT, 457 #endif 458 PSWPIN, 459 PSWPOUT, 460 PGFAULT, 461 PGMAJFAULT, 462 PGACTIVATE, 463 PGDEACTIVATE, 464 PGLAZYFREE, 465 PGLAZYFREED, 466 #ifdef CONFIG_SWAP 467 SWPIN_ZERO, 468 SWPOUT_ZERO, 469 #endif 470 #ifdef CONFIG_ZSWAP 471 ZSWPIN, 472 ZSWPOUT, 473 ZSWPWB, 474 #endif 475 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 476 THP_FAULT_ALLOC, 477 THP_COLLAPSE_ALLOC, 478 THP_SWPOUT, 479 THP_SWPOUT_FALLBACK, 480 #endif 481 #ifdef CONFIG_NUMA_BALANCING 482 NUMA_PAGE_MIGRATE, 483 NUMA_PTE_UPDATES, 484 NUMA_HINT_FAULTS, 485 #endif 486 }; 487 488 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 489 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 490 491 static void init_memcg_events(void) 492 { 493 u8 i; 494 495 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX); 496 497 memset(mem_cgroup_events_index, U8_MAX, 498 sizeof(mem_cgroup_events_index)); 499 500 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 501 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i; 502 } 503 504 static inline int memcg_events_index(enum vm_event_item idx) 505 { 506 return mem_cgroup_events_index[idx]; 507 } 508 509 struct memcg_vmstats_percpu { 510 /* Stats updates since the last flush */ 511 unsigned int stats_updates; 512 513 /* Cached pointers for fast iteration in memcg_rstat_updated() */ 514 struct memcg_vmstats_percpu __percpu *parent_pcpu; 515 struct memcg_vmstats *vmstats; 516 517 /* The above should fit a single cacheline for memcg_rstat_updated() */ 518 519 /* Local (CPU and cgroup) page state & events */ 520 long state[MEMCG_VMSTAT_SIZE]; 521 unsigned long events[NR_MEMCG_EVENTS]; 522 523 /* Delta calculation for lockless upward propagation */ 524 long state_prev[MEMCG_VMSTAT_SIZE]; 525 unsigned long events_prev[NR_MEMCG_EVENTS]; 526 } ____cacheline_aligned; 527 528 struct memcg_vmstats { 529 /* Aggregated (CPU and subtree) page state & events */ 530 long state[MEMCG_VMSTAT_SIZE]; 531 unsigned long events[NR_MEMCG_EVENTS]; 532 533 /* Non-hierarchical (CPU aggregated) page state & events */ 534 long state_local[MEMCG_VMSTAT_SIZE]; 535 unsigned long events_local[NR_MEMCG_EVENTS]; 536 537 /* Pending child counts during tree propagation */ 538 long state_pending[MEMCG_VMSTAT_SIZE]; 539 unsigned long events_pending[NR_MEMCG_EVENTS]; 540 541 /* Stats updates since the last flush */ 542 atomic_t stats_updates; 543 }; 544 545 /* 546 * memcg and lruvec stats flushing 547 * 548 * Many codepaths leading to stats update or read are performance sensitive and 549 * adding stats flushing in such codepaths is not desirable. So, to optimize the 550 * flushing the kernel does: 551 * 552 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 553 * rstat update tree grow unbounded. 554 * 555 * 2) Flush the stats synchronously on reader side only when there are more than 556 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 557 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 558 * only for 2 seconds due to (1). 559 */ 560 static void flush_memcg_stats_dwork(struct work_struct *w); 561 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 562 static u64 flush_last_time; 563 564 #define FLUSH_TIME (2UL*HZ) 565 566 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) 567 { 568 return atomic_read(&vmstats->stats_updates) > 569 MEMCG_CHARGE_BATCH * num_online_cpus(); 570 } 571 572 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val, 573 int cpu) 574 { 575 struct memcg_vmstats_percpu __percpu *statc_pcpu; 576 struct memcg_vmstats_percpu *statc; 577 unsigned int stats_updates; 578 579 if (!val) 580 return; 581 582 css_rstat_updated(&memcg->css, cpu); 583 statc_pcpu = memcg->vmstats_percpu; 584 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) { 585 statc = this_cpu_ptr(statc_pcpu); 586 /* 587 * If @memcg is already flushable then all its ancestors are 588 * flushable as well and also there is no need to increase 589 * stats_updates. 590 */ 591 if (memcg_vmstats_needs_flush(statc->vmstats)) 592 break; 593 594 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates, 595 abs(val)); 596 if (stats_updates < MEMCG_CHARGE_BATCH) 597 continue; 598 599 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0); 600 atomic_add(stats_updates, &statc->vmstats->stats_updates); 601 } 602 } 603 604 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) 605 { 606 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); 607 608 trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates), 609 force, needs_flush); 610 611 if (!force && !needs_flush) 612 return; 613 614 if (mem_cgroup_is_root(memcg)) 615 WRITE_ONCE(flush_last_time, jiffies_64); 616 617 css_rstat_flush(&memcg->css); 618 } 619 620 /* 621 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree 622 * @memcg: root of the subtree to flush 623 * 624 * Flushing is serialized by the underlying global rstat lock. There is also a 625 * minimum amount of work to be done even if there are no stat updates to flush. 626 * Hence, we only flush the stats if the updates delta exceeds a threshold. This 627 * avoids unnecessary work and contention on the underlying lock. 628 */ 629 void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 630 { 631 if (mem_cgroup_disabled()) 632 return; 633 634 if (!memcg) 635 memcg = root_mem_cgroup; 636 637 __mem_cgroup_flush_stats(memcg, false); 638 } 639 640 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 641 { 642 /* Only flush if the periodic flusher is one full cycle late */ 643 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) 644 mem_cgroup_flush_stats(memcg); 645 } 646 647 static void flush_memcg_stats_dwork(struct work_struct *w) 648 { 649 /* 650 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing 651 * in latency-sensitive paths is as cheap as possible. 652 */ 653 __mem_cgroup_flush_stats(root_mem_cgroup, true); 654 queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); 655 } 656 657 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 658 { 659 long x; 660 int i = memcg_stats_index(idx); 661 662 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 663 return 0; 664 665 x = READ_ONCE(memcg->vmstats->state[i]); 666 #ifdef CONFIG_SMP 667 if (x < 0) 668 x = 0; 669 #endif 670 return x; 671 } 672 673 bool memcg_stat_item_valid(int idx) 674 { 675 if ((u32)idx >= MEMCG_NR_STAT) 676 return false; 677 678 return !BAD_STAT_IDX(memcg_stats_index(idx)); 679 } 680 681 static int memcg_page_state_unit(int item); 682 683 /* 684 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round 685 * up non-zero sub-page updates to 1 page as zero page updates are ignored. 686 */ 687 static int memcg_state_val_in_pages(int idx, int val) 688 { 689 int unit = memcg_page_state_unit(idx); 690 691 if (!val || unit == PAGE_SIZE) 692 return val; 693 else 694 return max(val * unit / PAGE_SIZE, 1UL); 695 } 696 697 /** 698 * mod_memcg_state - update cgroup memory statistics 699 * @memcg: the memory cgroup 700 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 701 * @val: delta to add to the counter, can be negative 702 */ 703 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 704 int val) 705 { 706 int i = memcg_stats_index(idx); 707 int cpu; 708 709 if (mem_cgroup_disabled()) 710 return; 711 712 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 713 return; 714 715 cpu = get_cpu(); 716 717 this_cpu_add(memcg->vmstats_percpu->state[i], val); 718 val = memcg_state_val_in_pages(idx, val); 719 memcg_rstat_updated(memcg, val, cpu); 720 trace_mod_memcg_state(memcg, idx, val); 721 722 put_cpu(); 723 } 724 725 #ifdef CONFIG_MEMCG_V1 726 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 727 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 728 { 729 long x; 730 int i = memcg_stats_index(idx); 731 732 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 733 return 0; 734 735 x = READ_ONCE(memcg->vmstats->state_local[i]); 736 #ifdef CONFIG_SMP 737 if (x < 0) 738 x = 0; 739 #endif 740 return x; 741 } 742 #endif 743 744 static void mod_memcg_lruvec_state(struct lruvec *lruvec, 745 enum node_stat_item idx, 746 int val) 747 { 748 struct mem_cgroup_per_node *pn; 749 struct mem_cgroup *memcg; 750 int i = memcg_stats_index(idx); 751 int cpu; 752 753 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 754 return; 755 756 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 757 memcg = pn->memcg; 758 759 cpu = get_cpu(); 760 761 /* Update memcg */ 762 this_cpu_add(memcg->vmstats_percpu->state[i], val); 763 764 /* Update lruvec */ 765 this_cpu_add(pn->lruvec_stats_percpu->state[i], val); 766 767 val = memcg_state_val_in_pages(idx, val); 768 memcg_rstat_updated(memcg, val, cpu); 769 trace_mod_memcg_lruvec_state(memcg, idx, val); 770 771 put_cpu(); 772 } 773 774 /** 775 * mod_lruvec_state - update lruvec memory statistics 776 * @lruvec: the lruvec 777 * @idx: the stat item 778 * @val: delta to add to the counter, can be negative 779 * 780 * The lruvec is the intersection of the NUMA node and a cgroup. This 781 * function updates the all three counters that are affected by a 782 * change of state at this level: per-node, per-cgroup, per-lruvec. 783 */ 784 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 785 int val) 786 { 787 /* Update node */ 788 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 789 790 /* Update memcg and lruvec */ 791 if (!mem_cgroup_disabled()) 792 mod_memcg_lruvec_state(lruvec, idx, val); 793 } 794 795 void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 796 int val) 797 { 798 struct mem_cgroup *memcg; 799 pg_data_t *pgdat = folio_pgdat(folio); 800 struct lruvec *lruvec; 801 802 rcu_read_lock(); 803 memcg = folio_memcg(folio); 804 /* Untracked pages have no memcg, no lruvec. Update only the node */ 805 if (!memcg) { 806 rcu_read_unlock(); 807 mod_node_page_state(pgdat, idx, val); 808 return; 809 } 810 811 lruvec = mem_cgroup_lruvec(memcg, pgdat); 812 mod_lruvec_state(lruvec, idx, val); 813 rcu_read_unlock(); 814 } 815 EXPORT_SYMBOL(lruvec_stat_mod_folio); 816 817 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 818 { 819 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 820 struct mem_cgroup *memcg; 821 struct lruvec *lruvec; 822 823 rcu_read_lock(); 824 memcg = mem_cgroup_from_virt(p); 825 826 /* 827 * Untracked pages have no memcg, no lruvec. Update only the 828 * node. If we reparent the slab objects to the root memcg, 829 * when we free the slab object, we need to update the per-memcg 830 * vmstats to keep it correct for the root memcg. 831 */ 832 if (!memcg) { 833 mod_node_page_state(pgdat, idx, val); 834 } else { 835 lruvec = mem_cgroup_lruvec(memcg, pgdat); 836 mod_lruvec_state(lruvec, idx, val); 837 } 838 rcu_read_unlock(); 839 } 840 841 /** 842 * count_memcg_events - account VM events in a cgroup 843 * @memcg: the memory cgroup 844 * @idx: the event item 845 * @count: the number of events that occurred 846 */ 847 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 848 unsigned long count) 849 { 850 int i = memcg_events_index(idx); 851 int cpu; 852 853 if (mem_cgroup_disabled()) 854 return; 855 856 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx)) 857 return; 858 859 cpu = get_cpu(); 860 861 this_cpu_add(memcg->vmstats_percpu->events[i], count); 862 memcg_rstat_updated(memcg, count, cpu); 863 trace_count_memcg_events(memcg, idx, count); 864 865 put_cpu(); 866 } 867 868 unsigned long memcg_events(struct mem_cgroup *memcg, int event) 869 { 870 int i = memcg_events_index(event); 871 872 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 873 return 0; 874 875 return READ_ONCE(memcg->vmstats->events[i]); 876 } 877 878 bool memcg_vm_event_item_valid(enum vm_event_item idx) 879 { 880 if (idx >= NR_VM_EVENT_ITEMS) 881 return false; 882 883 return !BAD_STAT_IDX(memcg_events_index(idx)); 884 } 885 886 #ifdef CONFIG_MEMCG_V1 887 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 888 { 889 int i = memcg_events_index(event); 890 891 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event)) 892 return 0; 893 894 return READ_ONCE(memcg->vmstats->events_local[i]); 895 } 896 #endif 897 898 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 899 { 900 /* 901 * mm_update_next_owner() may clear mm->owner to NULL 902 * if it races with swapoff, page migration, etc. 903 * So this can be called with p == NULL. 904 */ 905 if (unlikely(!p)) 906 return NULL; 907 908 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 909 } 910 EXPORT_SYMBOL(mem_cgroup_from_task); 911 912 static __always_inline struct mem_cgroup *active_memcg(void) 913 { 914 if (!in_task()) 915 return this_cpu_read(int_active_memcg); 916 else 917 return current->active_memcg; 918 } 919 920 /** 921 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 922 * @mm: mm from which memcg should be extracted. It can be NULL. 923 * 924 * Obtain a reference on mm->memcg and returns it if successful. If mm 925 * is NULL, then the memcg is chosen as follows: 926 * 1) The active memcg, if set. 927 * 2) current->mm->memcg, if available 928 * 3) root memcg 929 * If mem_cgroup is disabled, NULL is returned. 930 */ 931 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 932 { 933 struct mem_cgroup *memcg; 934 935 if (mem_cgroup_disabled()) 936 return NULL; 937 938 /* 939 * Page cache insertions can happen without an 940 * actual mm context, e.g. during disk probing 941 * on boot, loopback IO, acct() writes etc. 942 * 943 * No need to css_get on root memcg as the reference 944 * counting is disabled on the root level in the 945 * cgroup core. See CSS_NO_REF. 946 */ 947 if (unlikely(!mm)) { 948 memcg = active_memcg(); 949 if (unlikely(memcg)) { 950 /* remote memcg must hold a ref */ 951 css_get(&memcg->css); 952 return memcg; 953 } 954 mm = current->mm; 955 if (unlikely(!mm)) 956 return root_mem_cgroup; 957 } 958 959 rcu_read_lock(); 960 do { 961 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 962 if (unlikely(!memcg)) 963 memcg = root_mem_cgroup; 964 } while (!css_tryget(&memcg->css)); 965 rcu_read_unlock(); 966 return memcg; 967 } 968 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 969 970 /** 971 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg. 972 */ 973 struct mem_cgroup *get_mem_cgroup_from_current(void) 974 { 975 struct mem_cgroup *memcg; 976 977 if (mem_cgroup_disabled()) 978 return NULL; 979 980 again: 981 rcu_read_lock(); 982 memcg = mem_cgroup_from_task(current); 983 if (!css_tryget(&memcg->css)) { 984 rcu_read_unlock(); 985 goto again; 986 } 987 rcu_read_unlock(); 988 return memcg; 989 } 990 991 /** 992 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg. 993 * @folio: folio from which memcg should be extracted. 994 */ 995 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 996 { 997 struct mem_cgroup *memcg = folio_memcg(folio); 998 999 if (mem_cgroup_disabled()) 1000 return NULL; 1001 1002 rcu_read_lock(); 1003 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 1004 memcg = root_mem_cgroup; 1005 rcu_read_unlock(); 1006 return memcg; 1007 } 1008 1009 /** 1010 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1011 * @root: hierarchy root 1012 * @prev: previously returned memcg, NULL on first invocation 1013 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1014 * 1015 * Returns references to children of the hierarchy below @root, or 1016 * @root itself, or %NULL after a full round-trip. 1017 * 1018 * Caller must pass the return value in @prev on subsequent 1019 * invocations for reference counting, or use mem_cgroup_iter_break() 1020 * to cancel a hierarchy walk before the round-trip is complete. 1021 * 1022 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1023 * in the hierarchy among all concurrent reclaimers operating on the 1024 * same node. 1025 */ 1026 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1027 struct mem_cgroup *prev, 1028 struct mem_cgroup_reclaim_cookie *reclaim) 1029 { 1030 struct mem_cgroup_reclaim_iter *iter; 1031 struct cgroup_subsys_state *css; 1032 struct mem_cgroup *pos; 1033 struct mem_cgroup *next; 1034 1035 if (mem_cgroup_disabled()) 1036 return NULL; 1037 1038 if (!root) 1039 root = root_mem_cgroup; 1040 1041 rcu_read_lock(); 1042 restart: 1043 next = NULL; 1044 1045 if (reclaim) { 1046 int gen; 1047 int nid = reclaim->pgdat->node_id; 1048 1049 iter = &root->nodeinfo[nid]->iter; 1050 gen = atomic_read(&iter->generation); 1051 1052 /* 1053 * On start, join the current reclaim iteration cycle. 1054 * Exit when a concurrent walker completes it. 1055 */ 1056 if (!prev) 1057 reclaim->generation = gen; 1058 else if (reclaim->generation != gen) 1059 goto out_unlock; 1060 1061 pos = READ_ONCE(iter->position); 1062 } else 1063 pos = prev; 1064 1065 css = pos ? &pos->css : NULL; 1066 1067 while ((css = css_next_descendant_pre(css, &root->css))) { 1068 /* 1069 * Verify the css and acquire a reference. The root 1070 * is provided by the caller, so we know it's alive 1071 * and kicking, and don't take an extra reference. 1072 */ 1073 if (css == &root->css || css_tryget(css)) 1074 break; 1075 } 1076 1077 next = mem_cgroup_from_css(css); 1078 1079 if (reclaim) { 1080 /* 1081 * The position could have already been updated by a competing 1082 * thread, so check that the value hasn't changed since we read 1083 * it to avoid reclaiming from the same cgroup twice. 1084 */ 1085 if (cmpxchg(&iter->position, pos, next) != pos) { 1086 if (css && css != &root->css) 1087 css_put(css); 1088 goto restart; 1089 } 1090 1091 if (!next) { 1092 atomic_inc(&iter->generation); 1093 1094 /* 1095 * Reclaimers share the hierarchy walk, and a 1096 * new one might jump in right at the end of 1097 * the hierarchy - make sure they see at least 1098 * one group and restart from the beginning. 1099 */ 1100 if (!prev) 1101 goto restart; 1102 } 1103 } 1104 1105 out_unlock: 1106 rcu_read_unlock(); 1107 if (prev && prev != root) 1108 css_put(&prev->css); 1109 1110 return next; 1111 } 1112 1113 /** 1114 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1115 * @root: hierarchy root 1116 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1117 */ 1118 void mem_cgroup_iter_break(struct mem_cgroup *root, 1119 struct mem_cgroup *prev) 1120 { 1121 if (!root) 1122 root = root_mem_cgroup; 1123 if (prev && prev != root) 1124 css_put(&prev->css); 1125 } 1126 1127 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1128 struct mem_cgroup *dead_memcg) 1129 { 1130 struct mem_cgroup_reclaim_iter *iter; 1131 struct mem_cgroup_per_node *mz; 1132 int nid; 1133 1134 for_each_node(nid) { 1135 mz = from->nodeinfo[nid]; 1136 iter = &mz->iter; 1137 cmpxchg(&iter->position, dead_memcg, NULL); 1138 } 1139 } 1140 1141 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1142 { 1143 struct mem_cgroup *memcg = dead_memcg; 1144 struct mem_cgroup *last; 1145 1146 do { 1147 __invalidate_reclaim_iterators(memcg, dead_memcg); 1148 last = memcg; 1149 } while ((memcg = parent_mem_cgroup(memcg))); 1150 1151 /* 1152 * When cgroup1 non-hierarchy mode is used, 1153 * parent_mem_cgroup() does not walk all the way up to the 1154 * cgroup root (root_mem_cgroup). So we have to handle 1155 * dead_memcg from cgroup root separately. 1156 */ 1157 if (!mem_cgroup_is_root(last)) 1158 __invalidate_reclaim_iterators(root_mem_cgroup, 1159 dead_memcg); 1160 } 1161 1162 /** 1163 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1164 * @memcg: hierarchy root 1165 * @fn: function to call for each task 1166 * @arg: argument passed to @fn 1167 * 1168 * This function iterates over tasks attached to @memcg or to any of its 1169 * descendants and calls @fn for each task. If @fn returns a non-zero 1170 * value, the function breaks the iteration loop. Otherwise, it will iterate 1171 * over all tasks and return 0. 1172 * 1173 * This function must not be called for the root memory cgroup. 1174 */ 1175 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1176 int (*fn)(struct task_struct *, void *), void *arg) 1177 { 1178 struct mem_cgroup *iter; 1179 int ret = 0; 1180 1181 BUG_ON(mem_cgroup_is_root(memcg)); 1182 1183 for_each_mem_cgroup_tree(iter, memcg) { 1184 struct css_task_iter it; 1185 struct task_struct *task; 1186 1187 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1188 while (!ret && (task = css_task_iter_next(&it))) { 1189 ret = fn(task, arg); 1190 /* Avoid potential softlockup warning */ 1191 cond_resched(); 1192 } 1193 css_task_iter_end(&it); 1194 if (ret) { 1195 mem_cgroup_iter_break(memcg, iter); 1196 break; 1197 } 1198 } 1199 } 1200 1201 #ifdef CONFIG_DEBUG_VM 1202 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1203 { 1204 struct mem_cgroup *memcg; 1205 1206 if (mem_cgroup_disabled()) 1207 return; 1208 1209 memcg = folio_memcg(folio); 1210 1211 if (!memcg) 1212 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1213 else 1214 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1215 } 1216 #endif 1217 1218 /** 1219 * folio_lruvec_lock - Lock the lruvec for a folio. 1220 * @folio: Pointer to the folio. 1221 * 1222 * These functions are safe to use under any of the following conditions: 1223 * - folio locked 1224 * - folio_test_lru false 1225 * - folio frozen (refcount of 0) 1226 * 1227 * Return: The lruvec this folio is on with its lock held. 1228 */ 1229 struct lruvec *folio_lruvec_lock(struct folio *folio) 1230 { 1231 struct lruvec *lruvec = folio_lruvec(folio); 1232 1233 spin_lock(&lruvec->lru_lock); 1234 lruvec_memcg_debug(lruvec, folio); 1235 1236 return lruvec; 1237 } 1238 1239 /** 1240 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1241 * @folio: Pointer to the folio. 1242 * 1243 * These functions are safe to use under any of the following conditions: 1244 * - folio locked 1245 * - folio_test_lru false 1246 * - folio frozen (refcount of 0) 1247 * 1248 * Return: The lruvec this folio is on with its lock held and interrupts 1249 * disabled. 1250 */ 1251 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1252 { 1253 struct lruvec *lruvec = folio_lruvec(folio); 1254 1255 spin_lock_irq(&lruvec->lru_lock); 1256 lruvec_memcg_debug(lruvec, folio); 1257 1258 return lruvec; 1259 } 1260 1261 /** 1262 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1263 * @folio: Pointer to the folio. 1264 * @flags: Pointer to irqsave flags. 1265 * 1266 * These functions are safe to use under any of the following conditions: 1267 * - folio locked 1268 * - folio_test_lru false 1269 * - folio frozen (refcount of 0) 1270 * 1271 * Return: The lruvec this folio is on with its lock held and interrupts 1272 * disabled. 1273 */ 1274 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1275 unsigned long *flags) 1276 { 1277 struct lruvec *lruvec = folio_lruvec(folio); 1278 1279 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1280 lruvec_memcg_debug(lruvec, folio); 1281 1282 return lruvec; 1283 } 1284 1285 /** 1286 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1287 * @lruvec: mem_cgroup per zone lru vector 1288 * @lru: index of lru list the page is sitting on 1289 * @zid: zone id of the accounted pages 1290 * @nr_pages: positive when adding or negative when removing 1291 * 1292 * This function must be called under lru_lock, just before a page is added 1293 * to or just after a page is removed from an lru list. 1294 */ 1295 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1296 int zid, int nr_pages) 1297 { 1298 struct mem_cgroup_per_node *mz; 1299 unsigned long *lru_size; 1300 long size; 1301 1302 if (mem_cgroup_disabled()) 1303 return; 1304 1305 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1306 lru_size = &mz->lru_zone_size[zid][lru]; 1307 1308 if (nr_pages < 0) 1309 *lru_size += nr_pages; 1310 1311 size = *lru_size; 1312 if (WARN_ONCE(size < 0, 1313 "%s(%p, %d, %d): lru_size %ld\n", 1314 __func__, lruvec, lru, nr_pages, size)) { 1315 VM_BUG_ON(1); 1316 *lru_size = 0; 1317 } 1318 1319 if (nr_pages > 0) 1320 *lru_size += nr_pages; 1321 } 1322 1323 /** 1324 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1325 * @memcg: the memory cgroup 1326 * 1327 * Returns the maximum amount of memory @mem can be charged with, in 1328 * pages. 1329 */ 1330 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1331 { 1332 unsigned long margin = 0; 1333 unsigned long count; 1334 unsigned long limit; 1335 1336 count = page_counter_read(&memcg->memory); 1337 limit = READ_ONCE(memcg->memory.max); 1338 if (count < limit) 1339 margin = limit - count; 1340 1341 if (do_memsw_account()) { 1342 count = page_counter_read(&memcg->memsw); 1343 limit = READ_ONCE(memcg->memsw.max); 1344 if (count < limit) 1345 margin = min(margin, limit - count); 1346 else 1347 margin = 0; 1348 } 1349 1350 return margin; 1351 } 1352 1353 struct memory_stat { 1354 const char *name; 1355 unsigned int idx; 1356 }; 1357 1358 static const struct memory_stat memory_stats[] = { 1359 { "anon", NR_ANON_MAPPED }, 1360 { "file", NR_FILE_PAGES }, 1361 { "kernel", MEMCG_KMEM }, 1362 { "kernel_stack", NR_KERNEL_STACK_KB }, 1363 { "pagetables", NR_PAGETABLE }, 1364 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1365 { "percpu", MEMCG_PERCPU_B }, 1366 { "sock", MEMCG_SOCK }, 1367 { "vmalloc", NR_VMALLOC }, 1368 { "shmem", NR_SHMEM }, 1369 #ifdef CONFIG_ZSWAP 1370 { "zswap", MEMCG_ZSWAP_B }, 1371 { "zswapped", MEMCG_ZSWAPPED }, 1372 { "zswap_incomp", MEMCG_ZSWAP_INCOMP }, 1373 #endif 1374 { "file_mapped", NR_FILE_MAPPED }, 1375 { "file_dirty", NR_FILE_DIRTY }, 1376 { "file_writeback", NR_WRITEBACK }, 1377 #ifdef CONFIG_SWAP 1378 { "swapcached", NR_SWAPCACHE }, 1379 #endif 1380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1381 { "anon_thp", NR_ANON_THPS }, 1382 { "file_thp", NR_FILE_THPS }, 1383 { "shmem_thp", NR_SHMEM_THPS }, 1384 #endif 1385 { "inactive_anon", NR_INACTIVE_ANON }, 1386 { "active_anon", NR_ACTIVE_ANON }, 1387 { "inactive_file", NR_INACTIVE_FILE }, 1388 { "active_file", NR_ACTIVE_FILE }, 1389 { "unevictable", NR_UNEVICTABLE }, 1390 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1391 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1392 #ifdef CONFIG_HUGETLB_PAGE 1393 { "hugetlb", NR_HUGETLB }, 1394 #endif 1395 1396 /* The memory events */ 1397 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1398 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1399 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1400 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1401 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1402 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1403 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1404 1405 { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, 1406 { "pgdemote_direct", PGDEMOTE_DIRECT }, 1407 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, 1408 { "pgdemote_proactive", PGDEMOTE_PROACTIVE }, 1409 { "pgsteal_kswapd", PGSTEAL_KSWAPD }, 1410 { "pgsteal_direct", PGSTEAL_DIRECT }, 1411 { "pgsteal_khugepaged", PGSTEAL_KHUGEPAGED }, 1412 { "pgsteal_proactive", PGSTEAL_PROACTIVE }, 1413 { "pgscan_kswapd", PGSCAN_KSWAPD }, 1414 { "pgscan_direct", PGSCAN_DIRECT }, 1415 { "pgscan_khugepaged", PGSCAN_KHUGEPAGED }, 1416 { "pgscan_proactive", PGSCAN_PROACTIVE }, 1417 { "pgrefill", PGREFILL }, 1418 #ifdef CONFIG_NUMA_BALANCING 1419 { "pgpromote_success", PGPROMOTE_SUCCESS }, 1420 #endif 1421 }; 1422 1423 /* The actual unit of the state item, not the same as the output unit */ 1424 static int memcg_page_state_unit(int item) 1425 { 1426 switch (item) { 1427 case MEMCG_PERCPU_B: 1428 case MEMCG_ZSWAP_B: 1429 case NR_SLAB_RECLAIMABLE_B: 1430 case NR_SLAB_UNRECLAIMABLE_B: 1431 return 1; 1432 case NR_KERNEL_STACK_KB: 1433 return SZ_1K; 1434 default: 1435 return PAGE_SIZE; 1436 } 1437 } 1438 1439 /* Translate stat items to the correct unit for memory.stat output */ 1440 static int memcg_page_state_output_unit(int item) 1441 { 1442 /* 1443 * Workingset state is actually in pages, but we export it to userspace 1444 * as a scalar count of events, so special case it here. 1445 * 1446 * Demotion and promotion activities are exported in pages, consistent 1447 * with their global counterparts. 1448 */ 1449 switch (item) { 1450 case WORKINGSET_REFAULT_ANON: 1451 case WORKINGSET_REFAULT_FILE: 1452 case WORKINGSET_ACTIVATE_ANON: 1453 case WORKINGSET_ACTIVATE_FILE: 1454 case WORKINGSET_RESTORE_ANON: 1455 case WORKINGSET_RESTORE_FILE: 1456 case WORKINGSET_NODERECLAIM: 1457 case PGDEMOTE_KSWAPD: 1458 case PGDEMOTE_DIRECT: 1459 case PGDEMOTE_KHUGEPAGED: 1460 case PGDEMOTE_PROACTIVE: 1461 case PGSTEAL_KSWAPD: 1462 case PGSTEAL_DIRECT: 1463 case PGSTEAL_KHUGEPAGED: 1464 case PGSTEAL_PROACTIVE: 1465 case PGSCAN_KSWAPD: 1466 case PGSCAN_DIRECT: 1467 case PGSCAN_KHUGEPAGED: 1468 case PGSCAN_PROACTIVE: 1469 case PGREFILL: 1470 #ifdef CONFIG_NUMA_BALANCING 1471 case PGPROMOTE_SUCCESS: 1472 #endif 1473 return 1; 1474 default: 1475 return memcg_page_state_unit(item); 1476 } 1477 } 1478 1479 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) 1480 { 1481 return memcg_page_state(memcg, item) * 1482 memcg_page_state_output_unit(item); 1483 } 1484 1485 #ifdef CONFIG_MEMCG_V1 1486 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) 1487 { 1488 return memcg_page_state_local(memcg, item) * 1489 memcg_page_state_output_unit(item); 1490 } 1491 #endif 1492 1493 #ifdef CONFIG_HUGETLB_PAGE 1494 static bool memcg_accounts_hugetlb(void) 1495 { 1496 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; 1497 } 1498 #else /* CONFIG_HUGETLB_PAGE */ 1499 static bool memcg_accounts_hugetlb(void) 1500 { 1501 return false; 1502 } 1503 #endif /* CONFIG_HUGETLB_PAGE */ 1504 1505 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1506 { 1507 int i; 1508 1509 /* 1510 * Provide statistics on the state of the memory subsystem as 1511 * well as cumulative event counters that show past behavior. 1512 * 1513 * This list is ordered following a combination of these gradients: 1514 * 1) generic big picture -> specifics and details 1515 * 2) reflecting userspace activity -> reflecting kernel heuristics 1516 * 1517 * Current memory state: 1518 */ 1519 mem_cgroup_flush_stats(memcg); 1520 1521 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1522 u64 size; 1523 1524 #ifdef CONFIG_HUGETLB_PAGE 1525 if (unlikely(memory_stats[i].idx == NR_HUGETLB) && 1526 !memcg_accounts_hugetlb()) 1527 continue; 1528 #endif 1529 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1530 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); 1531 1532 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1533 size += memcg_page_state_output(memcg, 1534 NR_SLAB_RECLAIMABLE_B); 1535 seq_buf_printf(s, "slab %llu\n", size); 1536 } 1537 } 1538 1539 /* Accumulated memory events */ 1540 seq_buf_printf(s, "pgscan %lu\n", 1541 memcg_page_state(memcg, PGSCAN_KSWAPD) + 1542 memcg_page_state(memcg, PGSCAN_DIRECT) + 1543 memcg_page_state(memcg, PGSCAN_PROACTIVE) + 1544 memcg_page_state(memcg, PGSCAN_KHUGEPAGED)); 1545 seq_buf_printf(s, "pgsteal %lu\n", 1546 memcg_page_state(memcg, PGSTEAL_KSWAPD) + 1547 memcg_page_state(memcg, PGSTEAL_DIRECT) + 1548 memcg_page_state(memcg, PGSTEAL_PROACTIVE) + 1549 memcg_page_state(memcg, PGSTEAL_KHUGEPAGED)); 1550 1551 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1552 #ifdef CONFIG_MEMCG_V1 1553 if (memcg_vm_event_stat[i] == PGPGIN || 1554 memcg_vm_event_stat[i] == PGPGOUT) 1555 continue; 1556 #endif 1557 seq_buf_printf(s, "%s %lu\n", 1558 vm_event_name(memcg_vm_event_stat[i]), 1559 memcg_events(memcg, memcg_vm_event_stat[i])); 1560 } 1561 } 1562 1563 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1564 { 1565 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1566 memcg_stat_format(memcg, s); 1567 else 1568 memcg1_stat_format(memcg, s); 1569 if (seq_buf_has_overflowed(s)) 1570 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__); 1571 } 1572 1573 /** 1574 * mem_cgroup_print_oom_context: Print OOM information relevant to 1575 * memory controller. 1576 * @memcg: The memory cgroup that went over limit 1577 * @p: Task that is going to be killed 1578 * 1579 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1580 * enabled 1581 */ 1582 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1583 { 1584 rcu_read_lock(); 1585 1586 if (memcg) { 1587 pr_cont(",oom_memcg="); 1588 pr_cont_cgroup_path(memcg->css.cgroup); 1589 } else 1590 pr_cont(",global_oom"); 1591 if (p) { 1592 pr_cont(",task_memcg="); 1593 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1594 } 1595 rcu_read_unlock(); 1596 } 1597 1598 /** 1599 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1600 * memory controller. 1601 * @memcg: The memory cgroup that went over limit 1602 */ 1603 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1604 { 1605 /* Use static buffer, for the caller is holding oom_lock. */ 1606 static char buf[SEQ_BUF_SIZE]; 1607 struct seq_buf s; 1608 unsigned long memory_failcnt; 1609 1610 lockdep_assert_held(&oom_lock); 1611 1612 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1613 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); 1614 else 1615 memory_failcnt = memcg->memory.failcnt; 1616 1617 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1618 K((u64)page_counter_read(&memcg->memory)), 1619 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); 1620 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1621 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1622 K((u64)page_counter_read(&memcg->swap)), 1623 K((u64)READ_ONCE(memcg->swap.max)), 1624 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 1625 #ifdef CONFIG_MEMCG_V1 1626 else { 1627 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1628 K((u64)page_counter_read(&memcg->memsw)), 1629 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1630 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1631 K((u64)page_counter_read(&memcg->kmem)), 1632 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1633 } 1634 #endif 1635 1636 pr_info("Memory cgroup stats for "); 1637 pr_cont_cgroup_path(memcg->css.cgroup); 1638 pr_cont(":"); 1639 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 1640 memory_stat_format(memcg, &s); 1641 seq_buf_do_printk(&s, KERN_INFO); 1642 } 1643 1644 /* 1645 * Return the memory (and swap, if configured) limit for a memcg. 1646 */ 1647 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1648 { 1649 unsigned long max = READ_ONCE(memcg->memory.max); 1650 1651 if (do_memsw_account()) { 1652 if (mem_cgroup_swappiness(memcg)) { 1653 /* Calculate swap excess capacity from memsw limit */ 1654 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1655 1656 max += min(swap, (unsigned long)total_swap_pages); 1657 } 1658 } else { 1659 if (mem_cgroup_swappiness(memcg)) 1660 max += min(READ_ONCE(memcg->swap.max), 1661 (unsigned long)total_swap_pages); 1662 } 1663 return max; 1664 } 1665 1666 void __memcg_memory_event(struct mem_cgroup *memcg, 1667 enum memcg_memory_event event, bool allow_spinning) 1668 { 1669 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1670 event == MEMCG_SWAP_FAIL; 1671 1672 /* For now only MEMCG_MAX can happen with !allow_spinning context. */ 1673 VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX); 1674 1675 atomic_long_inc(&memcg->memory_events_local[event]); 1676 if (!swap_event && allow_spinning) 1677 cgroup_file_notify(&memcg->events_local_file); 1678 1679 do { 1680 atomic_long_inc(&memcg->memory_events[event]); 1681 if (allow_spinning) { 1682 if (swap_event) 1683 cgroup_file_notify(&memcg->swap_events_file); 1684 else 1685 cgroup_file_notify(&memcg->events_file); 1686 } 1687 1688 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1689 break; 1690 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1691 break; 1692 } while ((memcg = parent_mem_cgroup(memcg)) && 1693 !mem_cgroup_is_root(memcg)); 1694 } 1695 EXPORT_SYMBOL_GPL(__memcg_memory_event); 1696 1697 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1698 int order) 1699 { 1700 struct oom_control oc = { 1701 .zonelist = NULL, 1702 .nodemask = NULL, 1703 .memcg = memcg, 1704 .gfp_mask = gfp_mask, 1705 .order = order, 1706 }; 1707 bool ret = true; 1708 1709 if (mutex_lock_killable(&oom_lock)) 1710 return true; 1711 1712 if (mem_cgroup_margin(memcg) >= (1 << order)) 1713 goto unlock; 1714 1715 /* 1716 * A few threads which were not waiting at mutex_lock_killable() can 1717 * fail to bail out. Therefore, check again after holding oom_lock. 1718 */ 1719 ret = out_of_memory(&oc); 1720 1721 unlock: 1722 mutex_unlock(&oom_lock); 1723 return ret; 1724 } 1725 1726 /* 1727 * Returns true if successfully killed one or more processes. Though in some 1728 * corner cases it can return true even without killing any process. 1729 */ 1730 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1731 { 1732 bool locked, ret; 1733 1734 if (order > PAGE_ALLOC_COSTLY_ORDER) 1735 return false; 1736 1737 memcg_memory_event(memcg, MEMCG_OOM); 1738 1739 if (!memcg1_oom_prepare(memcg, &locked)) 1740 return false; 1741 1742 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1743 1744 memcg1_oom_finish(memcg, locked); 1745 1746 return ret; 1747 } 1748 1749 /** 1750 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1751 * @victim: task to be killed by the OOM killer 1752 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1753 * 1754 * Returns a pointer to a memory cgroup, which has to be cleaned up 1755 * by killing all belonging OOM-killable tasks. 1756 * 1757 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1758 */ 1759 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1760 struct mem_cgroup *oom_domain) 1761 { 1762 struct mem_cgroup *oom_group = NULL; 1763 struct mem_cgroup *memcg; 1764 1765 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1766 return NULL; 1767 1768 if (!oom_domain) 1769 oom_domain = root_mem_cgroup; 1770 1771 rcu_read_lock(); 1772 1773 memcg = mem_cgroup_from_task(victim); 1774 if (mem_cgroup_is_root(memcg)) 1775 goto out; 1776 1777 /* 1778 * If the victim task has been asynchronously moved to a different 1779 * memory cgroup, we might end up killing tasks outside oom_domain. 1780 * In this case it's better to ignore memory.group.oom. 1781 */ 1782 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1783 goto out; 1784 1785 /* 1786 * Traverse the memory cgroup hierarchy from the victim task's 1787 * cgroup up to the OOMing cgroup (or root) to find the 1788 * highest-level memory cgroup with oom.group set. 1789 */ 1790 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1791 if (READ_ONCE(memcg->oom_group)) 1792 oom_group = memcg; 1793 1794 if (memcg == oom_domain) 1795 break; 1796 } 1797 1798 if (oom_group) 1799 css_get(&oom_group->css); 1800 out: 1801 rcu_read_unlock(); 1802 1803 return oom_group; 1804 } 1805 1806 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1807 { 1808 pr_info("Tasks in "); 1809 pr_cont_cgroup_path(memcg->css.cgroup); 1810 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1811 } 1812 1813 /* 1814 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their 1815 * nr_pages in a single cacheline. This may change in future. 1816 */ 1817 #define NR_MEMCG_STOCK 7 1818 #define FLUSHING_CACHED_CHARGE 0 1819 struct memcg_stock_pcp { 1820 local_trylock_t lock; 1821 uint8_t nr_pages[NR_MEMCG_STOCK]; 1822 struct mem_cgroup *cached[NR_MEMCG_STOCK]; 1823 1824 struct work_struct work; 1825 unsigned long flags; 1826 }; 1827 1828 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = { 1829 .lock = INIT_LOCAL_TRYLOCK(lock), 1830 }; 1831 1832 struct obj_stock_pcp { 1833 local_trylock_t lock; 1834 unsigned int nr_bytes; 1835 struct obj_cgroup *cached_objcg; 1836 struct pglist_data *cached_pgdat; 1837 int nr_slab_reclaimable_b; 1838 int nr_slab_unreclaimable_b; 1839 1840 struct work_struct work; 1841 unsigned long flags; 1842 }; 1843 1844 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = { 1845 .lock = INIT_LOCAL_TRYLOCK(lock), 1846 }; 1847 1848 static DEFINE_MUTEX(percpu_charge_mutex); 1849 1850 static void drain_obj_stock(struct obj_stock_pcp *stock); 1851 static bool obj_stock_flush_required(struct obj_stock_pcp *stock, 1852 struct mem_cgroup *root_memcg); 1853 1854 /** 1855 * consume_stock: Try to consume stocked charge on this cpu. 1856 * @memcg: memcg to consume from. 1857 * @nr_pages: how many pages to charge. 1858 * 1859 * Consume the cached charge if enough nr_pages are present otherwise return 1860 * failure. Also return failure for charge request larger than 1861 * MEMCG_CHARGE_BATCH or if the local lock is already taken. 1862 * 1863 * returns true if successful, false otherwise. 1864 */ 1865 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1866 { 1867 struct memcg_stock_pcp *stock; 1868 uint8_t stock_pages; 1869 bool ret = false; 1870 int i; 1871 1872 if (nr_pages > MEMCG_CHARGE_BATCH || 1873 !local_trylock(&memcg_stock.lock)) 1874 return ret; 1875 1876 stock = this_cpu_ptr(&memcg_stock); 1877 1878 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 1879 if (memcg != READ_ONCE(stock->cached[i])) 1880 continue; 1881 1882 stock_pages = READ_ONCE(stock->nr_pages[i]); 1883 if (stock_pages >= nr_pages) { 1884 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages); 1885 ret = true; 1886 } 1887 break; 1888 } 1889 1890 local_unlock(&memcg_stock.lock); 1891 1892 return ret; 1893 } 1894 1895 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 1896 { 1897 page_counter_uncharge(&memcg->memory, nr_pages); 1898 if (do_memsw_account()) 1899 page_counter_uncharge(&memcg->memsw, nr_pages); 1900 } 1901 1902 /* 1903 * Returns stocks cached in percpu and reset cached information. 1904 */ 1905 static void drain_stock(struct memcg_stock_pcp *stock, int i) 1906 { 1907 struct mem_cgroup *old = READ_ONCE(stock->cached[i]); 1908 uint8_t stock_pages; 1909 1910 if (!old) 1911 return; 1912 1913 stock_pages = READ_ONCE(stock->nr_pages[i]); 1914 if (stock_pages) { 1915 memcg_uncharge(old, stock_pages); 1916 WRITE_ONCE(stock->nr_pages[i], 0); 1917 } 1918 1919 css_put(&old->css); 1920 WRITE_ONCE(stock->cached[i], NULL); 1921 } 1922 1923 static void drain_stock_fully(struct memcg_stock_pcp *stock) 1924 { 1925 int i; 1926 1927 for (i = 0; i < NR_MEMCG_STOCK; ++i) 1928 drain_stock(stock, i); 1929 } 1930 1931 static void drain_local_memcg_stock(struct work_struct *dummy) 1932 { 1933 struct memcg_stock_pcp *stock; 1934 1935 if (WARN_ONCE(!in_task(), "drain in non-task context")) 1936 return; 1937 1938 local_lock(&memcg_stock.lock); 1939 1940 stock = this_cpu_ptr(&memcg_stock); 1941 drain_stock_fully(stock); 1942 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1943 1944 local_unlock(&memcg_stock.lock); 1945 } 1946 1947 static void drain_local_obj_stock(struct work_struct *dummy) 1948 { 1949 struct obj_stock_pcp *stock; 1950 1951 if (WARN_ONCE(!in_task(), "drain in non-task context")) 1952 return; 1953 1954 local_lock(&obj_stock.lock); 1955 1956 stock = this_cpu_ptr(&obj_stock); 1957 drain_obj_stock(stock); 1958 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1959 1960 local_unlock(&obj_stock.lock); 1961 } 1962 1963 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1964 { 1965 struct memcg_stock_pcp *stock; 1966 struct mem_cgroup *cached; 1967 uint8_t stock_pages; 1968 bool success = false; 1969 int empty_slot = -1; 1970 int i; 1971 1972 /* 1973 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we 1974 * decide to increase it more than 127 then we will need more careful 1975 * handling of nr_pages[] in struct memcg_stock_pcp. 1976 */ 1977 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX); 1978 1979 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg)); 1980 1981 if (nr_pages > MEMCG_CHARGE_BATCH || 1982 !local_trylock(&memcg_stock.lock)) { 1983 /* 1984 * In case of larger than batch refill or unlikely failure to 1985 * lock the percpu memcg_stock.lock, uncharge memcg directly. 1986 */ 1987 memcg_uncharge(memcg, nr_pages); 1988 return; 1989 } 1990 1991 stock = this_cpu_ptr(&memcg_stock); 1992 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 1993 cached = READ_ONCE(stock->cached[i]); 1994 if (!cached && empty_slot == -1) 1995 empty_slot = i; 1996 if (memcg == READ_ONCE(stock->cached[i])) { 1997 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages; 1998 WRITE_ONCE(stock->nr_pages[i], stock_pages); 1999 if (stock_pages > MEMCG_CHARGE_BATCH) 2000 drain_stock(stock, i); 2001 success = true; 2002 break; 2003 } 2004 } 2005 2006 if (!success) { 2007 i = empty_slot; 2008 if (i == -1) { 2009 i = get_random_u32_below(NR_MEMCG_STOCK); 2010 drain_stock(stock, i); 2011 } 2012 css_get(&memcg->css); 2013 WRITE_ONCE(stock->cached[i], memcg); 2014 WRITE_ONCE(stock->nr_pages[i], nr_pages); 2015 } 2016 2017 local_unlock(&memcg_stock.lock); 2018 } 2019 2020 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock, 2021 struct mem_cgroup *root_memcg) 2022 { 2023 struct mem_cgroup *memcg; 2024 bool flush = false; 2025 int i; 2026 2027 rcu_read_lock(); 2028 for (i = 0; i < NR_MEMCG_STOCK; ++i) { 2029 memcg = READ_ONCE(stock->cached[i]); 2030 if (!memcg) 2031 continue; 2032 2033 if (READ_ONCE(stock->nr_pages[i]) && 2034 mem_cgroup_is_descendant(memcg, root_memcg)) { 2035 flush = true; 2036 break; 2037 } 2038 } 2039 rcu_read_unlock(); 2040 return flush; 2041 } 2042 2043 static void schedule_drain_work(int cpu, struct work_struct *work) 2044 { 2045 /* 2046 * Protect housekeeping cpumask read and work enqueue together 2047 * in the same RCU critical section so that later cpuset isolated 2048 * partition update only need to wait for an RCU GP and flush the 2049 * pending work on newly isolated CPUs. 2050 */ 2051 guard(rcu)(); 2052 if (!cpu_is_isolated(cpu)) 2053 queue_work_on(cpu, memcg_wq, work); 2054 } 2055 2056 /* 2057 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2058 * of the hierarchy under it. 2059 */ 2060 void drain_all_stock(struct mem_cgroup *root_memcg) 2061 { 2062 int cpu, curcpu; 2063 2064 /* If someone's already draining, avoid adding running more workers. */ 2065 if (!mutex_trylock(&percpu_charge_mutex)) 2066 return; 2067 /* 2068 * Notify other cpus that system-wide "drain" is running 2069 * We do not care about races with the cpu hotplug because cpu down 2070 * as well as workers from this path always operate on the local 2071 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2072 */ 2073 migrate_disable(); 2074 curcpu = smp_processor_id(); 2075 for_each_online_cpu(cpu) { 2076 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu); 2077 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu); 2078 2079 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) && 2080 is_memcg_drain_needed(memcg_st, root_memcg) && 2081 !test_and_set_bit(FLUSHING_CACHED_CHARGE, 2082 &memcg_st->flags)) { 2083 if (cpu == curcpu) 2084 drain_local_memcg_stock(&memcg_st->work); 2085 else 2086 schedule_drain_work(cpu, &memcg_st->work); 2087 } 2088 2089 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) && 2090 obj_stock_flush_required(obj_st, root_memcg) && 2091 !test_and_set_bit(FLUSHING_CACHED_CHARGE, 2092 &obj_st->flags)) { 2093 if (cpu == curcpu) 2094 drain_local_obj_stock(&obj_st->work); 2095 else 2096 schedule_drain_work(cpu, &obj_st->work); 2097 } 2098 } 2099 migrate_enable(); 2100 mutex_unlock(&percpu_charge_mutex); 2101 } 2102 2103 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2104 { 2105 /* no need for the local lock */ 2106 drain_obj_stock(&per_cpu(obj_stock, cpu)); 2107 drain_stock_fully(&per_cpu(memcg_stock, cpu)); 2108 2109 return 0; 2110 } 2111 2112 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2113 unsigned int nr_pages, 2114 gfp_t gfp_mask) 2115 { 2116 unsigned long nr_reclaimed = 0; 2117 2118 do { 2119 unsigned long pflags; 2120 2121 if (page_counter_read(&memcg->memory) <= 2122 READ_ONCE(memcg->memory.high)) 2123 continue; 2124 2125 memcg_memory_event(memcg, MEMCG_HIGH); 2126 2127 psi_memstall_enter(&pflags); 2128 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2129 gfp_mask, 2130 MEMCG_RECLAIM_MAY_SWAP, 2131 NULL); 2132 psi_memstall_leave(&pflags); 2133 } while ((memcg = parent_mem_cgroup(memcg)) && 2134 !mem_cgroup_is_root(memcg)); 2135 2136 return nr_reclaimed; 2137 } 2138 2139 static void high_work_func(struct work_struct *work) 2140 { 2141 struct mem_cgroup *memcg; 2142 2143 memcg = container_of(work, struct mem_cgroup, high_work); 2144 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2145 } 2146 2147 /* 2148 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2149 * enough to still cause a significant slowdown in most cases, while still 2150 * allowing diagnostics and tracing to proceed without becoming stuck. 2151 */ 2152 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2153 2154 /* 2155 * When calculating the delay, we use these either side of the exponentiation to 2156 * maintain precision and scale to a reasonable number of jiffies (see the table 2157 * below. 2158 * 2159 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2160 * overage ratio to a delay. 2161 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2162 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2163 * to produce a reasonable delay curve. 2164 * 2165 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2166 * reasonable delay curve compared to precision-adjusted overage, not 2167 * penalising heavily at first, but still making sure that growth beyond the 2168 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2169 * example, with a high of 100 megabytes: 2170 * 2171 * +-------+------------------------+ 2172 * | usage | time to allocate in ms | 2173 * +-------+------------------------+ 2174 * | 100M | 0 | 2175 * | 101M | 6 | 2176 * | 102M | 25 | 2177 * | 103M | 57 | 2178 * | 104M | 102 | 2179 * | 105M | 159 | 2180 * | 106M | 230 | 2181 * | 107M | 313 | 2182 * | 108M | 409 | 2183 * | 109M | 518 | 2184 * | 110M | 639 | 2185 * | 111M | 774 | 2186 * | 112M | 921 | 2187 * | 113M | 1081 | 2188 * | 114M | 1254 | 2189 * | 115M | 1439 | 2190 * | 116M | 1638 | 2191 * | 117M | 1849 | 2192 * | 118M | 2000 | 2193 * | 119M | 2000 | 2194 * | 120M | 2000 | 2195 * +-------+------------------------+ 2196 */ 2197 #define MEMCG_DELAY_PRECISION_SHIFT 20 2198 #define MEMCG_DELAY_SCALING_SHIFT 14 2199 2200 static u64 calculate_overage(unsigned long usage, unsigned long high) 2201 { 2202 u64 overage; 2203 2204 if (usage <= high) 2205 return 0; 2206 2207 /* 2208 * Prevent division by 0 in overage calculation by acting as if 2209 * it was a threshold of 1 page 2210 */ 2211 high = max(high, 1UL); 2212 2213 overage = usage - high; 2214 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2215 return div64_u64(overage, high); 2216 } 2217 2218 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2219 { 2220 u64 overage, max_overage = 0; 2221 2222 do { 2223 overage = calculate_overage(page_counter_read(&memcg->memory), 2224 READ_ONCE(memcg->memory.high)); 2225 max_overage = max(overage, max_overage); 2226 } while ((memcg = parent_mem_cgroup(memcg)) && 2227 !mem_cgroup_is_root(memcg)); 2228 2229 return max_overage; 2230 } 2231 2232 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2233 { 2234 u64 overage, max_overage = 0; 2235 2236 do { 2237 overage = calculate_overage(page_counter_read(&memcg->swap), 2238 READ_ONCE(memcg->swap.high)); 2239 if (overage) 2240 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2241 max_overage = max(overage, max_overage); 2242 } while ((memcg = parent_mem_cgroup(memcg)) && 2243 !mem_cgroup_is_root(memcg)); 2244 2245 return max_overage; 2246 } 2247 2248 /* 2249 * Get the number of jiffies that we should penalise a mischievous cgroup which 2250 * is exceeding its memory.high by checking both it and its ancestors. 2251 */ 2252 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2253 unsigned int nr_pages, 2254 u64 max_overage) 2255 { 2256 unsigned long penalty_jiffies; 2257 2258 if (!max_overage) 2259 return 0; 2260 2261 /* 2262 * We use overage compared to memory.high to calculate the number of 2263 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2264 * fairly lenient on small overages, and increasingly harsh when the 2265 * memcg in question makes it clear that it has no intention of stopping 2266 * its crazy behaviour, so we exponentially increase the delay based on 2267 * overage amount. 2268 */ 2269 penalty_jiffies = max_overage * max_overage * HZ; 2270 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2271 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2272 2273 /* 2274 * Factor in the task's own contribution to the overage, such that four 2275 * N-sized allocations are throttled approximately the same as one 2276 * 4N-sized allocation. 2277 * 2278 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2279 * larger the current charge patch is than that. 2280 */ 2281 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2282 } 2283 2284 /* 2285 * Reclaims memory over the high limit. Called directly from 2286 * try_charge() (context permitting), as well as from the userland 2287 * return path where reclaim is always able to block. 2288 */ 2289 void __mem_cgroup_handle_over_high(gfp_t gfp_mask) 2290 { 2291 unsigned long penalty_jiffies; 2292 unsigned long pflags; 2293 unsigned long nr_reclaimed; 2294 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2295 int nr_retries = MAX_RECLAIM_RETRIES; 2296 struct mem_cgroup *memcg; 2297 bool in_retry = false; 2298 2299 memcg = get_mem_cgroup_from_mm(current->mm); 2300 current->memcg_nr_pages_over_high = 0; 2301 2302 retry_reclaim: 2303 /* 2304 * Bail if the task is already exiting. Unlike memory.max, 2305 * memory.high enforcement isn't as strict, and there is no 2306 * OOM killer involved, which means the excess could already 2307 * be much bigger (and still growing) than it could for 2308 * memory.max; the dying task could get stuck in fruitless 2309 * reclaim for a long time, which isn't desirable. 2310 */ 2311 if (task_is_dying()) 2312 goto out; 2313 2314 /* 2315 * The allocating task should reclaim at least the batch size, but for 2316 * subsequent retries we only want to do what's necessary to prevent oom 2317 * or breaching resource isolation. 2318 * 2319 * This is distinct from memory.max or page allocator behaviour because 2320 * memory.high is currently batched, whereas memory.max and the page 2321 * allocator run every time an allocation is made. 2322 */ 2323 nr_reclaimed = reclaim_high(memcg, 2324 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2325 gfp_mask); 2326 2327 /* 2328 * memory.high is breached and reclaim is unable to keep up. Throttle 2329 * allocators proactively to slow down excessive growth. 2330 */ 2331 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2332 mem_find_max_overage(memcg)); 2333 2334 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2335 swap_find_max_overage(memcg)); 2336 2337 /* 2338 * Clamp the max delay per usermode return so as to still keep the 2339 * application moving forwards and also permit diagnostics, albeit 2340 * extremely slowly. 2341 */ 2342 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2343 2344 /* 2345 * Don't sleep if the amount of jiffies this memcg owes us is so low 2346 * that it's not even worth doing, in an attempt to be nice to those who 2347 * go only a small amount over their memory.high value and maybe haven't 2348 * been aggressively reclaimed enough yet. 2349 */ 2350 if (penalty_jiffies <= HZ / 100) 2351 goto out; 2352 2353 /* 2354 * If reclaim is making forward progress but we're still over 2355 * memory.high, we want to encourage that rather than doing allocator 2356 * throttling. 2357 */ 2358 if (nr_reclaimed || nr_retries--) { 2359 in_retry = true; 2360 goto retry_reclaim; 2361 } 2362 2363 /* 2364 * Reclaim didn't manage to push usage below the limit, slow 2365 * this allocating task down. 2366 * 2367 * If we exit early, we're guaranteed to die (since 2368 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2369 * need to account for any ill-begotten jiffies to pay them off later. 2370 */ 2371 psi_memstall_enter(&pflags); 2372 schedule_timeout_killable(penalty_jiffies); 2373 psi_memstall_leave(&pflags); 2374 2375 out: 2376 css_put(&memcg->css); 2377 } 2378 2379 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2380 unsigned int nr_pages) 2381 { 2382 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2383 int nr_retries = MAX_RECLAIM_RETRIES; 2384 struct mem_cgroup *mem_over_limit; 2385 struct page_counter *counter; 2386 unsigned long nr_reclaimed; 2387 bool passed_oom = false; 2388 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2389 bool drained = false; 2390 bool raised_max_event = false; 2391 unsigned long pflags; 2392 bool allow_spinning = gfpflags_allow_spinning(gfp_mask); 2393 2394 retry: 2395 if (consume_stock(memcg, nr_pages)) 2396 return 0; 2397 2398 if (!allow_spinning) 2399 /* Avoid the refill and flush of the older stock */ 2400 batch = nr_pages; 2401 2402 if (!do_memsw_account() || 2403 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2404 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2405 goto done_restock; 2406 if (do_memsw_account()) 2407 page_counter_uncharge(&memcg->memsw, batch); 2408 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2409 } else { 2410 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2411 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2412 } 2413 2414 if (batch > nr_pages) { 2415 batch = nr_pages; 2416 goto retry; 2417 } 2418 2419 /* 2420 * Prevent unbounded recursion when reclaim operations need to 2421 * allocate memory. This might exceed the limits temporarily, 2422 * but we prefer facilitating memory reclaim and getting back 2423 * under the limit over triggering OOM kills in these cases. 2424 */ 2425 if (unlikely(current->flags & PF_MEMALLOC)) 2426 goto force; 2427 2428 if (unlikely(task_in_memcg_oom(current))) 2429 goto nomem; 2430 2431 if (!gfpflags_allow_blocking(gfp_mask)) 2432 goto nomem; 2433 2434 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); 2435 raised_max_event = true; 2436 2437 psi_memstall_enter(&pflags); 2438 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2439 gfp_mask, reclaim_options, NULL); 2440 psi_memstall_leave(&pflags); 2441 2442 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2443 goto retry; 2444 2445 if (!drained) { 2446 drain_all_stock(mem_over_limit); 2447 drained = true; 2448 goto retry; 2449 } 2450 2451 if (gfp_mask & __GFP_NORETRY) 2452 goto nomem; 2453 /* 2454 * Even though the limit is exceeded at this point, reclaim 2455 * may have been able to free some pages. Retry the charge 2456 * before killing the task. 2457 * 2458 * Only for regular pages, though: huge pages are rather 2459 * unlikely to succeed so close to the limit, and we fall back 2460 * to regular pages anyway in case of failure. 2461 */ 2462 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2463 goto retry; 2464 2465 if (nr_retries--) 2466 goto retry; 2467 2468 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2469 goto nomem; 2470 2471 /* Avoid endless loop for tasks bypassed by the oom killer */ 2472 if (passed_oom && task_is_dying()) 2473 goto nomem; 2474 2475 /* 2476 * keep retrying as long as the memcg oom killer is able to make 2477 * a forward progress or bypass the charge if the oom killer 2478 * couldn't make any progress. 2479 */ 2480 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2481 get_order(nr_pages * PAGE_SIZE))) { 2482 passed_oom = true; 2483 nr_retries = MAX_RECLAIM_RETRIES; 2484 goto retry; 2485 } 2486 nomem: 2487 /* 2488 * Memcg doesn't have a dedicated reserve for atomic 2489 * allocations. But like the global atomic pool, we need to 2490 * put the burden of reclaim on regular allocation requests 2491 * and let these go through as privileged allocations. 2492 */ 2493 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2494 return -ENOMEM; 2495 force: 2496 /* 2497 * If the allocation has to be enforced, don't forget to raise 2498 * a MEMCG_MAX event. 2499 */ 2500 if (!raised_max_event) 2501 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning); 2502 2503 /* 2504 * The allocation either can't fail or will lead to more memory 2505 * being freed very soon. Allow memory usage go over the limit 2506 * temporarily by force charging it. 2507 */ 2508 page_counter_charge(&memcg->memory, nr_pages); 2509 if (do_memsw_account()) 2510 page_counter_charge(&memcg->memsw, nr_pages); 2511 2512 return 0; 2513 2514 done_restock: 2515 if (batch > nr_pages) 2516 refill_stock(memcg, batch - nr_pages); 2517 2518 /* 2519 * If the hierarchy is above the normal consumption range, schedule 2520 * reclaim on returning to userland. We can perform reclaim here 2521 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2522 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2523 * not recorded as it most likely matches current's and won't 2524 * change in the meantime. As high limit is checked again before 2525 * reclaim, the cost of mismatch is negligible. 2526 */ 2527 do { 2528 bool mem_high, swap_high; 2529 2530 mem_high = page_counter_read(&memcg->memory) > 2531 READ_ONCE(memcg->memory.high); 2532 swap_high = page_counter_read(&memcg->swap) > 2533 READ_ONCE(memcg->swap.high); 2534 2535 /* Don't bother a random interrupted task */ 2536 if (!in_task()) { 2537 if (mem_high) { 2538 schedule_work(&memcg->high_work); 2539 break; 2540 } 2541 continue; 2542 } 2543 2544 if (mem_high || swap_high) { 2545 /* 2546 * The allocating tasks in this cgroup will need to do 2547 * reclaim or be throttled to prevent further growth 2548 * of the memory or swap footprints. 2549 * 2550 * Target some best-effort fairness between the tasks, 2551 * and distribute reclaim work and delay penalties 2552 * based on how much each task is actually allocating. 2553 */ 2554 current->memcg_nr_pages_over_high += batch; 2555 set_notify_resume(current); 2556 break; 2557 } 2558 } while ((memcg = parent_mem_cgroup(memcg))); 2559 2560 /* 2561 * Reclaim is set up above to be called from the userland 2562 * return path. But also attempt synchronous reclaim to avoid 2563 * excessive overrun while the task is still inside the 2564 * kernel. If this is successful, the return path will see it 2565 * when it rechecks the overage and simply bail out. 2566 */ 2567 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2568 !(current->flags & PF_MEMALLOC) && 2569 gfpflags_allow_blocking(gfp_mask)) 2570 __mem_cgroup_handle_over_high(gfp_mask); 2571 return 0; 2572 } 2573 2574 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2575 unsigned int nr_pages) 2576 { 2577 if (mem_cgroup_is_root(memcg)) 2578 return 0; 2579 2580 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2581 } 2582 2583 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2584 { 2585 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); 2586 /* 2587 * Any of the following ensures page's memcg stability: 2588 * 2589 * - the page lock 2590 * - LRU isolation 2591 * - exclusive reference 2592 */ 2593 folio->memcg_data = (unsigned long)memcg; 2594 } 2595 2596 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 2597 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg, 2598 struct pglist_data *pgdat, 2599 enum node_stat_item idx, int nr) 2600 { 2601 struct lruvec *lruvec; 2602 2603 if (likely(!in_nmi())) { 2604 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2605 mod_memcg_lruvec_state(lruvec, idx, nr); 2606 } else { 2607 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id]; 2608 2609 /* preemption is disabled in_nmi(). */ 2610 css_rstat_updated(&memcg->css, smp_processor_id()); 2611 if (idx == NR_SLAB_RECLAIMABLE_B) 2612 atomic_add(nr, &pn->slab_reclaimable); 2613 else 2614 atomic_add(nr, &pn->slab_unreclaimable); 2615 } 2616 } 2617 #else 2618 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg, 2619 struct pglist_data *pgdat, 2620 enum node_stat_item idx, int nr) 2621 { 2622 struct lruvec *lruvec; 2623 2624 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2625 mod_memcg_lruvec_state(lruvec, idx, nr); 2626 } 2627 #endif 2628 2629 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 2630 struct pglist_data *pgdat, 2631 enum node_stat_item idx, int nr) 2632 { 2633 struct mem_cgroup *memcg; 2634 2635 rcu_read_lock(); 2636 memcg = obj_cgroup_memcg(objcg); 2637 account_slab_nmi_safe(memcg, pgdat, idx, nr); 2638 rcu_read_unlock(); 2639 } 2640 2641 static __always_inline 2642 struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p) 2643 { 2644 /* 2645 * Slab objects are accounted individually, not per-page. 2646 * Memcg membership data for each individual object is saved in 2647 * slab->obj_exts. 2648 */ 2649 unsigned long obj_exts; 2650 struct slabobj_ext *obj_ext; 2651 unsigned int off; 2652 2653 obj_exts = slab_obj_exts(slab); 2654 if (!obj_exts) 2655 return NULL; 2656 2657 get_slab_obj_exts(obj_exts); 2658 off = obj_to_index(slab->slab_cache, slab, p); 2659 obj_ext = slab_obj_ext(slab, obj_exts, off); 2660 if (obj_ext->objcg) { 2661 struct obj_cgroup *objcg = obj_ext->objcg; 2662 2663 put_slab_obj_exts(obj_exts); 2664 return obj_cgroup_memcg(objcg); 2665 } 2666 put_slab_obj_exts(obj_exts); 2667 2668 return NULL; 2669 } 2670 2671 /* 2672 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2673 * It is not suitable for objects allocated using vmalloc(). 2674 * 2675 * A passed kernel object must be a slab object or a generic kernel page. 2676 * 2677 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2678 * cgroup_mutex, etc. 2679 */ 2680 struct mem_cgroup *mem_cgroup_from_virt(void *p) 2681 { 2682 struct slab *slab; 2683 2684 if (mem_cgroup_disabled()) 2685 return NULL; 2686 2687 slab = virt_to_slab(p); 2688 if (slab) 2689 return mem_cgroup_from_obj_slab(slab, p); 2690 return folio_memcg_check(virt_to_folio(p)); 2691 } 2692 2693 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 2694 { 2695 struct obj_cgroup *objcg = NULL; 2696 2697 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2698 objcg = rcu_dereference(memcg->objcg); 2699 if (likely(objcg && obj_cgroup_tryget(objcg))) 2700 break; 2701 objcg = NULL; 2702 } 2703 return objcg; 2704 } 2705 2706 static struct obj_cgroup *current_objcg_update(void) 2707 { 2708 struct mem_cgroup *memcg; 2709 struct obj_cgroup *old, *objcg = NULL; 2710 2711 do { 2712 /* Atomically drop the update bit. */ 2713 old = xchg(¤t->objcg, NULL); 2714 if (old) { 2715 old = (struct obj_cgroup *) 2716 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); 2717 obj_cgroup_put(old); 2718 2719 old = NULL; 2720 } 2721 2722 /* If new objcg is NULL, no reason for the second atomic update. */ 2723 if (!current->mm || (current->flags & PF_KTHREAD)) 2724 return NULL; 2725 2726 /* 2727 * Release the objcg pointer from the previous iteration, 2728 * if try_cmpxcg() below fails. 2729 */ 2730 if (unlikely(objcg)) { 2731 obj_cgroup_put(objcg); 2732 objcg = NULL; 2733 } 2734 2735 /* 2736 * Obtain the new objcg pointer. The current task can be 2737 * asynchronously moved to another memcg and the previous 2738 * memcg can be offlined. So let's get the memcg pointer 2739 * and try get a reference to objcg under a rcu read lock. 2740 */ 2741 2742 rcu_read_lock(); 2743 memcg = mem_cgroup_from_task(current); 2744 objcg = __get_obj_cgroup_from_memcg(memcg); 2745 rcu_read_unlock(); 2746 2747 /* 2748 * Try set up a new objcg pointer atomically. If it 2749 * fails, it means the update flag was set concurrently, so 2750 * the whole procedure should be repeated. 2751 */ 2752 } while (!try_cmpxchg(¤t->objcg, &old, objcg)); 2753 2754 return objcg; 2755 } 2756 2757 __always_inline struct obj_cgroup *current_obj_cgroup(void) 2758 { 2759 struct mem_cgroup *memcg; 2760 struct obj_cgroup *objcg; 2761 2762 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi()) 2763 return NULL; 2764 2765 if (in_task()) { 2766 memcg = current->active_memcg; 2767 if (unlikely(memcg)) 2768 goto from_memcg; 2769 2770 objcg = READ_ONCE(current->objcg); 2771 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG)) 2772 objcg = current_objcg_update(); 2773 /* 2774 * Objcg reference is kept by the task, so it's safe 2775 * to use the objcg by the current task. 2776 */ 2777 return objcg; 2778 } 2779 2780 memcg = this_cpu_read(int_active_memcg); 2781 if (unlikely(memcg)) 2782 goto from_memcg; 2783 2784 return NULL; 2785 2786 from_memcg: 2787 objcg = NULL; 2788 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 2789 /* 2790 * Memcg pointer is protected by scope (see set_active_memcg()) 2791 * and is pinning the corresponding objcg, so objcg can't go 2792 * away and can be used within the scope without any additional 2793 * protection. 2794 */ 2795 objcg = rcu_dereference_check(memcg->objcg, 1); 2796 if (likely(objcg)) 2797 break; 2798 } 2799 2800 return objcg; 2801 } 2802 2803 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 2804 { 2805 struct obj_cgroup *objcg; 2806 2807 if (!memcg_kmem_online()) 2808 return NULL; 2809 2810 if (folio_memcg_kmem(folio)) { 2811 objcg = __folio_objcg(folio); 2812 obj_cgroup_get(objcg); 2813 } else { 2814 struct mem_cgroup *memcg; 2815 2816 rcu_read_lock(); 2817 memcg = __folio_memcg(folio); 2818 if (memcg) 2819 objcg = __get_obj_cgroup_from_memcg(memcg); 2820 else 2821 objcg = NULL; 2822 rcu_read_unlock(); 2823 } 2824 return objcg; 2825 } 2826 2827 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 2828 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val) 2829 { 2830 if (likely(!in_nmi())) { 2831 mod_memcg_state(memcg, MEMCG_KMEM, val); 2832 } else { 2833 /* preemption is disabled in_nmi(). */ 2834 css_rstat_updated(&memcg->css, smp_processor_id()); 2835 atomic_add(val, &memcg->kmem_stat); 2836 } 2837 } 2838 #else 2839 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val) 2840 { 2841 mod_memcg_state(memcg, MEMCG_KMEM, val); 2842 } 2843 #endif 2844 2845 /* 2846 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2847 * @objcg: object cgroup to uncharge 2848 * @nr_pages: number of pages to uncharge 2849 */ 2850 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2851 unsigned int nr_pages) 2852 { 2853 struct mem_cgroup *memcg; 2854 2855 memcg = get_mem_cgroup_from_objcg(objcg); 2856 2857 account_kmem_nmi_safe(memcg, -nr_pages); 2858 memcg1_account_kmem(memcg, -nr_pages); 2859 if (!mem_cgroup_is_root(memcg)) 2860 refill_stock(memcg, nr_pages); 2861 2862 css_put(&memcg->css); 2863 } 2864 2865 /* 2866 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2867 * @objcg: object cgroup to charge 2868 * @gfp: reclaim mode 2869 * @nr_pages: number of pages to charge 2870 * 2871 * Returns 0 on success, an error code on failure. 2872 */ 2873 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 2874 unsigned int nr_pages) 2875 { 2876 struct mem_cgroup *memcg; 2877 int ret; 2878 2879 memcg = get_mem_cgroup_from_objcg(objcg); 2880 2881 ret = try_charge_memcg(memcg, gfp, nr_pages); 2882 if (ret) 2883 goto out; 2884 2885 account_kmem_nmi_safe(memcg, nr_pages); 2886 memcg1_account_kmem(memcg, nr_pages); 2887 out: 2888 css_put(&memcg->css); 2889 2890 return ret; 2891 } 2892 2893 static struct obj_cgroup *page_objcg(const struct page *page) 2894 { 2895 unsigned long memcg_data = page->memcg_data; 2896 2897 if (mem_cgroup_disabled() || !memcg_data) 2898 return NULL; 2899 2900 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM, 2901 page); 2902 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); 2903 } 2904 2905 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg) 2906 { 2907 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; 2908 } 2909 2910 /** 2911 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 2912 * @page: page to charge 2913 * @gfp: reclaim mode 2914 * @order: allocation order 2915 * 2916 * Returns 0 on success, an error code on failure. 2917 */ 2918 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 2919 { 2920 struct obj_cgroup *objcg; 2921 int ret = 0; 2922 2923 objcg = current_obj_cgroup(); 2924 if (objcg) { 2925 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 2926 if (!ret) { 2927 obj_cgroup_get(objcg); 2928 page_set_objcg(page, objcg); 2929 return 0; 2930 } 2931 } 2932 return ret; 2933 } 2934 2935 /** 2936 * __memcg_kmem_uncharge_page: uncharge a kmem page 2937 * @page: page to uncharge 2938 * @order: allocation order 2939 */ 2940 void __memcg_kmem_uncharge_page(struct page *page, int order) 2941 { 2942 struct obj_cgroup *objcg = page_objcg(page); 2943 unsigned int nr_pages = 1 << order; 2944 2945 if (!objcg) 2946 return; 2947 2948 obj_cgroup_uncharge_pages(objcg, nr_pages); 2949 page->memcg_data = 0; 2950 obj_cgroup_put(objcg); 2951 } 2952 2953 static struct obj_stock_pcp *trylock_stock(void) 2954 { 2955 if (local_trylock(&obj_stock.lock)) 2956 return this_cpu_ptr(&obj_stock); 2957 2958 return NULL; 2959 } 2960 2961 static void unlock_stock(struct obj_stock_pcp *stock) 2962 { 2963 if (stock) 2964 local_unlock(&obj_stock.lock); 2965 } 2966 2967 /* Call after __refill_obj_stock() to ensure stock->cached_objg == objcg */ 2968 static void __account_obj_stock(struct obj_cgroup *objcg, 2969 struct obj_stock_pcp *stock, int nr, 2970 struct pglist_data *pgdat, enum node_stat_item idx) 2971 { 2972 int *bytes; 2973 2974 if (!stock || READ_ONCE(stock->cached_objcg) != objcg) 2975 goto direct; 2976 2977 /* 2978 * Save vmstat data in stock and skip vmstat array update unless 2979 * accumulating over a page of vmstat data or when pgdat changes. 2980 */ 2981 if (stock->cached_pgdat != pgdat) { 2982 /* Flush the existing cached vmstat data */ 2983 struct pglist_data *oldpg = stock->cached_pgdat; 2984 2985 if (stock->nr_slab_reclaimable_b) { 2986 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 2987 stock->nr_slab_reclaimable_b); 2988 stock->nr_slab_reclaimable_b = 0; 2989 } 2990 if (stock->nr_slab_unreclaimable_b) { 2991 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 2992 stock->nr_slab_unreclaimable_b); 2993 stock->nr_slab_unreclaimable_b = 0; 2994 } 2995 stock->cached_pgdat = pgdat; 2996 } 2997 2998 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 2999 : &stock->nr_slab_unreclaimable_b; 3000 /* 3001 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3002 * cached locally at least once before pushing it out. 3003 */ 3004 if (!*bytes) { 3005 *bytes = nr; 3006 nr = 0; 3007 } else { 3008 *bytes += nr; 3009 if (abs(*bytes) > PAGE_SIZE) { 3010 nr = *bytes; 3011 *bytes = 0; 3012 } else { 3013 nr = 0; 3014 } 3015 } 3016 direct: 3017 if (nr) 3018 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3019 } 3020 3021 static bool __consume_obj_stock(struct obj_cgroup *objcg, 3022 struct obj_stock_pcp *stock, 3023 unsigned int nr_bytes) 3024 { 3025 if (objcg == READ_ONCE(stock->cached_objcg) && 3026 stock->nr_bytes >= nr_bytes) { 3027 stock->nr_bytes -= nr_bytes; 3028 return true; 3029 } 3030 3031 return false; 3032 } 3033 3034 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3035 { 3036 struct obj_stock_pcp *stock; 3037 bool ret = false; 3038 3039 stock = trylock_stock(); 3040 if (!stock) 3041 return ret; 3042 3043 ret = __consume_obj_stock(objcg, stock, nr_bytes); 3044 unlock_stock(stock); 3045 3046 return ret; 3047 } 3048 3049 static void drain_obj_stock(struct obj_stock_pcp *stock) 3050 { 3051 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); 3052 3053 if (!old) 3054 return; 3055 3056 if (stock->nr_bytes) { 3057 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3058 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3059 3060 if (nr_pages) { 3061 struct mem_cgroup *memcg; 3062 3063 memcg = get_mem_cgroup_from_objcg(old); 3064 3065 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); 3066 memcg1_account_kmem(memcg, -nr_pages); 3067 if (!mem_cgroup_is_root(memcg)) 3068 memcg_uncharge(memcg, nr_pages); 3069 3070 css_put(&memcg->css); 3071 } 3072 3073 /* 3074 * The leftover is flushed to the centralized per-memcg value. 3075 * On the next attempt to refill obj stock it will be moved 3076 * to a per-cpu stock (probably, on an other CPU), see 3077 * refill_obj_stock(). 3078 * 3079 * How often it's flushed is a trade-off between the memory 3080 * limit enforcement accuracy and potential CPU contention, 3081 * so it might be changed in the future. 3082 */ 3083 atomic_add(nr_bytes, &old->nr_charged_bytes); 3084 stock->nr_bytes = 0; 3085 } 3086 3087 /* 3088 * Flush the vmstat data in current stock 3089 */ 3090 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3091 if (stock->nr_slab_reclaimable_b) { 3092 mod_objcg_mlstate(old, stock->cached_pgdat, 3093 NR_SLAB_RECLAIMABLE_B, 3094 stock->nr_slab_reclaimable_b); 3095 stock->nr_slab_reclaimable_b = 0; 3096 } 3097 if (stock->nr_slab_unreclaimable_b) { 3098 mod_objcg_mlstate(old, stock->cached_pgdat, 3099 NR_SLAB_UNRECLAIMABLE_B, 3100 stock->nr_slab_unreclaimable_b); 3101 stock->nr_slab_unreclaimable_b = 0; 3102 } 3103 stock->cached_pgdat = NULL; 3104 } 3105 3106 WRITE_ONCE(stock->cached_objcg, NULL); 3107 obj_cgroup_put(old); 3108 } 3109 3110 static bool obj_stock_flush_required(struct obj_stock_pcp *stock, 3111 struct mem_cgroup *root_memcg) 3112 { 3113 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); 3114 struct mem_cgroup *memcg; 3115 bool flush = false; 3116 3117 rcu_read_lock(); 3118 if (objcg) { 3119 memcg = obj_cgroup_memcg(objcg); 3120 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3121 flush = true; 3122 } 3123 rcu_read_unlock(); 3124 3125 return flush; 3126 } 3127 3128 static void __refill_obj_stock(struct obj_cgroup *objcg, 3129 struct obj_stock_pcp *stock, 3130 unsigned int nr_bytes, 3131 bool allow_uncharge) 3132 { 3133 unsigned int nr_pages = 0; 3134 3135 if (!stock) { 3136 nr_pages = nr_bytes >> PAGE_SHIFT; 3137 nr_bytes = nr_bytes & (PAGE_SIZE - 1); 3138 atomic_add(nr_bytes, &objcg->nr_charged_bytes); 3139 goto out; 3140 } 3141 3142 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ 3143 drain_obj_stock(stock); 3144 obj_cgroup_get(objcg); 3145 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3146 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3147 WRITE_ONCE(stock->cached_objcg, objcg); 3148 3149 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3150 } 3151 stock->nr_bytes += nr_bytes; 3152 3153 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3154 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3155 stock->nr_bytes &= (PAGE_SIZE - 1); 3156 } 3157 3158 out: 3159 if (nr_pages) 3160 obj_cgroup_uncharge_pages(objcg, nr_pages); 3161 } 3162 3163 static void refill_obj_stock(struct obj_cgroup *objcg, 3164 unsigned int nr_bytes, 3165 bool allow_uncharge) 3166 { 3167 struct obj_stock_pcp *stock = trylock_stock(); 3168 __refill_obj_stock(objcg, stock, nr_bytes, allow_uncharge); 3169 unlock_stock(stock); 3170 } 3171 3172 static int __obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, 3173 size_t size, size_t *remainder) 3174 { 3175 size_t charge_size; 3176 int ret; 3177 3178 charge_size = PAGE_ALIGN(size); 3179 ret = obj_cgroup_charge_pages(objcg, gfp, charge_size >> PAGE_SHIFT); 3180 if (!ret) 3181 *remainder = charge_size - size; 3182 3183 return ret; 3184 } 3185 3186 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3187 { 3188 size_t remainder; 3189 int ret; 3190 3191 if (likely(consume_obj_stock(objcg, size))) 3192 return 0; 3193 3194 /* 3195 * In theory, objcg->nr_charged_bytes can have enough 3196 * pre-charged bytes to satisfy the allocation. However, 3197 * flushing objcg->nr_charged_bytes requires two atomic 3198 * operations, and objcg->nr_charged_bytes can't be big. 3199 * The shared objcg->nr_charged_bytes can also become a 3200 * performance bottleneck if all tasks of the same memcg are 3201 * trying to update it. So it's better to ignore it and try 3202 * grab some new pages. The stock's nr_bytes will be flushed to 3203 * objcg->nr_charged_bytes later on when objcg changes. 3204 * 3205 * The stock's nr_bytes may contain enough pre-charged bytes 3206 * to allow one less page from being charged, but we can't rely 3207 * on the pre-charged bytes not being changed outside of 3208 * consume_obj_stock() or refill_obj_stock(). So ignore those 3209 * pre-charged bytes as well when charging pages. To avoid a 3210 * page uncharge right after a page charge, we set the 3211 * allow_uncharge flag to false when calling refill_obj_stock() 3212 * to temporarily allow the pre-charged bytes to exceed the page 3213 * size limit. The maximum reachable value of the pre-charged 3214 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3215 * race. 3216 */ 3217 ret = __obj_cgroup_charge(objcg, gfp, size, &remainder); 3218 if (!ret && remainder) 3219 refill_obj_stock(objcg, remainder, false); 3220 3221 return ret; 3222 } 3223 3224 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3225 { 3226 refill_obj_stock(objcg, size, true); 3227 } 3228 3229 static inline size_t obj_full_size(struct kmem_cache *s) 3230 { 3231 /* 3232 * For each accounted object there is an extra space which is used 3233 * to store obj_cgroup membership. Charge it too. 3234 */ 3235 return s->size + sizeof(struct obj_cgroup *); 3236 } 3237 3238 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 3239 gfp_t flags, size_t size, void **p) 3240 { 3241 size_t obj_size = obj_full_size(s); 3242 struct obj_cgroup *objcg; 3243 struct slab *slab; 3244 unsigned long off; 3245 size_t i; 3246 3247 /* 3248 * The obtained objcg pointer is safe to use within the current scope, 3249 * defined by current task or set_active_memcg() pair. 3250 * obj_cgroup_get() is used to get a permanent reference. 3251 */ 3252 objcg = current_obj_cgroup(); 3253 if (!objcg) 3254 return true; 3255 3256 /* 3257 * slab_alloc_node() avoids the NULL check, so we might be called with a 3258 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill 3259 * the whole requested size. 3260 * return success as there's nothing to free back 3261 */ 3262 if (unlikely(*p == NULL)) 3263 return true; 3264 3265 flags &= gfp_allowed_mask; 3266 3267 if (lru) { 3268 int ret; 3269 struct mem_cgroup *memcg; 3270 3271 memcg = get_mem_cgroup_from_objcg(objcg); 3272 ret = memcg_list_lru_alloc(memcg, lru, flags); 3273 css_put(&memcg->css); 3274 3275 if (ret) 3276 return false; 3277 } 3278 3279 for (i = 0; i < size; i++) { 3280 unsigned long obj_exts; 3281 struct slabobj_ext *obj_ext; 3282 struct obj_stock_pcp *stock; 3283 3284 slab = virt_to_slab(p[i]); 3285 3286 if (!slab_obj_exts(slab) && 3287 alloc_slab_obj_exts(slab, s, flags, false)) { 3288 continue; 3289 } 3290 3291 /* 3292 * if we fail and size is 1, memcg_alloc_abort_single() will 3293 * just free the object, which is ok as we have not assigned 3294 * objcg to its obj_ext yet 3295 * 3296 * for larger sizes, kmem_cache_free_bulk() will uncharge 3297 * any objects that were already charged and obj_ext assigned 3298 * 3299 * TODO: we could batch this until slab_pgdat(slab) changes 3300 * between iterations, with a more complicated undo 3301 */ 3302 stock = trylock_stock(); 3303 if (!stock || !__consume_obj_stock(objcg, stock, obj_size)) { 3304 size_t remainder; 3305 3306 unlock_stock(stock); 3307 if (__obj_cgroup_charge(objcg, flags, obj_size, &remainder)) 3308 return false; 3309 stock = trylock_stock(); 3310 if (remainder) 3311 __refill_obj_stock(objcg, stock, remainder, false); 3312 } 3313 __account_obj_stock(objcg, stock, obj_size, 3314 slab_pgdat(slab), cache_vmstat_idx(s)); 3315 unlock_stock(stock); 3316 3317 obj_exts = slab_obj_exts(slab); 3318 get_slab_obj_exts(obj_exts); 3319 off = obj_to_index(s, slab, p[i]); 3320 obj_ext = slab_obj_ext(slab, obj_exts, off); 3321 obj_cgroup_get(objcg); 3322 obj_ext->objcg = objcg; 3323 put_slab_obj_exts(obj_exts); 3324 } 3325 3326 return true; 3327 } 3328 3329 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 3330 void **p, int objects, unsigned long obj_exts) 3331 { 3332 size_t obj_size = obj_full_size(s); 3333 3334 for (int i = 0; i < objects; i++) { 3335 struct obj_cgroup *objcg; 3336 struct slabobj_ext *obj_ext; 3337 struct obj_stock_pcp *stock; 3338 unsigned int off; 3339 3340 off = obj_to_index(s, slab, p[i]); 3341 obj_ext = slab_obj_ext(slab, obj_exts, off); 3342 objcg = obj_ext->objcg; 3343 if (!objcg) 3344 continue; 3345 3346 obj_ext->objcg = NULL; 3347 3348 stock = trylock_stock(); 3349 __refill_obj_stock(objcg, stock, obj_size, true); 3350 __account_obj_stock(objcg, stock, -obj_size, 3351 slab_pgdat(slab), cache_vmstat_idx(s)); 3352 unlock_stock(stock); 3353 3354 obj_cgroup_put(objcg); 3355 } 3356 } 3357 3358 /* 3359 * The objcg is only set on the first page, so transfer it to all the 3360 * other pages. 3361 */ 3362 void split_page_memcg(struct page *page, unsigned order) 3363 { 3364 struct obj_cgroup *objcg = page_objcg(page); 3365 unsigned int i, nr = 1 << order; 3366 3367 if (!objcg) 3368 return; 3369 3370 for (i = 1; i < nr; i++) 3371 page_set_objcg(&page[i], objcg); 3372 3373 obj_cgroup_get_many(objcg, nr - 1); 3374 } 3375 3376 void folio_split_memcg_refs(struct folio *folio, unsigned old_order, 3377 unsigned new_order) 3378 { 3379 unsigned new_refs; 3380 3381 if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) 3382 return; 3383 3384 new_refs = (1 << (old_order - new_order)) - 1; 3385 css_get_many(&__folio_memcg(folio)->css, new_refs); 3386 } 3387 3388 static int memcg_online_kmem(struct mem_cgroup *memcg) 3389 { 3390 struct obj_cgroup *objcg; 3391 3392 if (mem_cgroup_kmem_disabled()) 3393 return 0; 3394 3395 if (unlikely(mem_cgroup_is_root(memcg))) 3396 return 0; 3397 3398 objcg = obj_cgroup_alloc(); 3399 if (!objcg) 3400 return -ENOMEM; 3401 3402 objcg->memcg = memcg; 3403 rcu_assign_pointer(memcg->objcg, objcg); 3404 obj_cgroup_get(objcg); 3405 memcg->orig_objcg = objcg; 3406 3407 static_branch_enable(&memcg_kmem_online_key); 3408 3409 memcg->kmemcg_id = memcg->id.id; 3410 3411 return 0; 3412 } 3413 3414 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3415 { 3416 struct mem_cgroup *parent; 3417 3418 if (mem_cgroup_kmem_disabled()) 3419 return; 3420 3421 if (unlikely(mem_cgroup_is_root(memcg))) 3422 return; 3423 3424 parent = parent_mem_cgroup(memcg); 3425 if (!parent) 3426 parent = root_mem_cgroup; 3427 3428 memcg_reparent_list_lrus(memcg, parent); 3429 3430 /* 3431 * Objcg's reparenting must be after list_lru's, make sure list_lru 3432 * helpers won't use parent's list_lru until child is drained. 3433 */ 3434 memcg_reparent_objcgs(memcg, parent); 3435 } 3436 3437 #ifdef CONFIG_CGROUP_WRITEBACK 3438 3439 #include <trace/events/writeback.h> 3440 3441 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3442 { 3443 return wb_domain_init(&memcg->cgwb_domain, gfp); 3444 } 3445 3446 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3447 { 3448 wb_domain_exit(&memcg->cgwb_domain); 3449 } 3450 3451 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3452 { 3453 wb_domain_size_changed(&memcg->cgwb_domain); 3454 } 3455 3456 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3457 { 3458 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3459 3460 if (!memcg->css.parent) 3461 return NULL; 3462 3463 return &memcg->cgwb_domain; 3464 } 3465 3466 /** 3467 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3468 * @wb: bdi_writeback in question 3469 * @pfilepages: out parameter for number of file pages 3470 * @pheadroom: out parameter for number of allocatable pages according to memcg 3471 * @pdirty: out parameter for number of dirty pages 3472 * @pwriteback: out parameter for number of pages under writeback 3473 * 3474 * Determine the numbers of file, headroom, dirty, and writeback pages in 3475 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3476 * is a bit more involved. 3477 * 3478 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3479 * headroom is calculated as the lowest headroom of itself and the 3480 * ancestors. Note that this doesn't consider the actual amount of 3481 * available memory in the system. The caller should further cap 3482 * *@pheadroom accordingly. 3483 */ 3484 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3485 unsigned long *pheadroom, unsigned long *pdirty, 3486 unsigned long *pwriteback) 3487 { 3488 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3489 struct mem_cgroup *parent; 3490 3491 mem_cgroup_flush_stats_ratelimited(memcg); 3492 3493 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3494 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3495 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 3496 memcg_page_state(memcg, NR_ACTIVE_FILE); 3497 3498 *pheadroom = PAGE_COUNTER_MAX; 3499 while ((parent = parent_mem_cgroup(memcg))) { 3500 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 3501 READ_ONCE(memcg->memory.high)); 3502 unsigned long used = page_counter_read(&memcg->memory); 3503 3504 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3505 memcg = parent; 3506 } 3507 } 3508 3509 /* 3510 * Foreign dirty flushing 3511 * 3512 * There's an inherent mismatch between memcg and writeback. The former 3513 * tracks ownership per-page while the latter per-inode. This was a 3514 * deliberate design decision because honoring per-page ownership in the 3515 * writeback path is complicated, may lead to higher CPU and IO overheads 3516 * and deemed unnecessary given that write-sharing an inode across 3517 * different cgroups isn't a common use-case. 3518 * 3519 * Combined with inode majority-writer ownership switching, this works well 3520 * enough in most cases but there are some pathological cases. For 3521 * example, let's say there are two cgroups A and B which keep writing to 3522 * different but confined parts of the same inode. B owns the inode and 3523 * A's memory is limited far below B's. A's dirty ratio can rise enough to 3524 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 3525 * triggering background writeback. A will be slowed down without a way to 3526 * make writeback of the dirty pages happen. 3527 * 3528 * Conditions like the above can lead to a cgroup getting repeatedly and 3529 * severely throttled after making some progress after each 3530 * dirty_expire_interval while the underlying IO device is almost 3531 * completely idle. 3532 * 3533 * Solving this problem completely requires matching the ownership tracking 3534 * granularities between memcg and writeback in either direction. However, 3535 * the more egregious behaviors can be avoided by simply remembering the 3536 * most recent foreign dirtying events and initiating remote flushes on 3537 * them when local writeback isn't enough to keep the memory clean enough. 3538 * 3539 * The following two functions implement such mechanism. When a foreign 3540 * page - a page whose memcg and writeback ownerships don't match - is 3541 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 3542 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 3543 * decides that the memcg needs to sleep due to high dirty ratio, it calls 3544 * mem_cgroup_flush_foreign() which queues writeback on the recorded 3545 * foreign bdi_writebacks which haven't expired. Both the numbers of 3546 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 3547 * limited to MEMCG_CGWB_FRN_CNT. 3548 * 3549 * The mechanism only remembers IDs and doesn't hold any object references. 3550 * As being wrong occasionally doesn't matter, updates and accesses to the 3551 * records are lockless and racy. 3552 */ 3553 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 3554 struct bdi_writeback *wb) 3555 { 3556 struct mem_cgroup *memcg = folio_memcg(folio); 3557 struct memcg_cgwb_frn *frn; 3558 u64 now = get_jiffies_64(); 3559 u64 oldest_at = now; 3560 int oldest = -1; 3561 int i; 3562 3563 trace_track_foreign_dirty(folio, wb); 3564 3565 /* 3566 * Pick the slot to use. If there is already a slot for @wb, keep 3567 * using it. If not replace the oldest one which isn't being 3568 * written out. 3569 */ 3570 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3571 frn = &memcg->cgwb_frn[i]; 3572 if (frn->bdi_id == wb->bdi->id && 3573 frn->memcg_id == wb->memcg_css->id) 3574 break; 3575 if (time_before64(frn->at, oldest_at) && 3576 atomic_read(&frn->done.cnt) == 1) { 3577 oldest = i; 3578 oldest_at = frn->at; 3579 } 3580 } 3581 3582 if (i < MEMCG_CGWB_FRN_CNT) { 3583 /* 3584 * Re-using an existing one. Update timestamp lazily to 3585 * avoid making the cacheline hot. We want them to be 3586 * reasonably up-to-date and significantly shorter than 3587 * dirty_expire_interval as that's what expires the record. 3588 * Use the shorter of 1s and dirty_expire_interval / 8. 3589 */ 3590 unsigned long update_intv = 3591 min_t(unsigned long, HZ, 3592 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 3593 3594 if (time_before64(frn->at, now - update_intv)) 3595 frn->at = now; 3596 } else if (oldest >= 0) { 3597 /* replace the oldest free one */ 3598 frn = &memcg->cgwb_frn[oldest]; 3599 frn->bdi_id = wb->bdi->id; 3600 frn->memcg_id = wb->memcg_css->id; 3601 frn->at = now; 3602 } 3603 } 3604 3605 /* issue foreign writeback flushes for recorded foreign dirtying events */ 3606 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 3607 { 3608 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3609 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 3610 u64 now = jiffies_64; 3611 int i; 3612 3613 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 3614 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 3615 3616 /* 3617 * If the record is older than dirty_expire_interval, 3618 * writeback on it has already started. No need to kick it 3619 * off again. Also, don't start a new one if there's 3620 * already one in flight. 3621 */ 3622 if (time_after64(frn->at, now - intv) && 3623 atomic_read(&frn->done.cnt) == 1) { 3624 frn->at = 0; 3625 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 3626 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 3627 WB_REASON_FOREIGN_FLUSH, 3628 &frn->done); 3629 } 3630 } 3631 } 3632 3633 #else /* CONFIG_CGROUP_WRITEBACK */ 3634 3635 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3636 { 3637 return 0; 3638 } 3639 3640 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3641 { 3642 } 3643 3644 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3645 { 3646 } 3647 3648 #endif /* CONFIG_CGROUP_WRITEBACK */ 3649 3650 /* 3651 * Private memory cgroup IDR 3652 * 3653 * Swap-out records and page cache shadow entries need to store memcg 3654 * references in constrained space, so we maintain an ID space that is 3655 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 3656 * memory-controlled cgroups to 64k. 3657 * 3658 * However, there usually are many references to the offline CSS after 3659 * the cgroup has been destroyed, such as page cache or reclaimable 3660 * slab objects, that don't need to hang on to the ID. We want to keep 3661 * those dead CSS from occupying IDs, or we might quickly exhaust the 3662 * relatively small ID space and prevent the creation of new cgroups 3663 * even when there are much fewer than 64k cgroups - possibly none. 3664 * 3665 * Maintain a private 16-bit ID space for memcg, and allow the ID to 3666 * be freed and recycled when it's no longer needed, which is usually 3667 * when the CSS is offlined. 3668 * 3669 * The only exception to that are records of swapped out tmpfs/shmem 3670 * pages that need to be attributed to live ancestors on swapin. But 3671 * those references are manageable from userspace. 3672 */ 3673 3674 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 3675 static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids); 3676 3677 static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg) 3678 { 3679 if (memcg->id.id > 0) { 3680 xa_erase(&mem_cgroup_private_ids, memcg->id.id); 3681 memcg->id.id = 0; 3682 } 3683 } 3684 3685 static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n) 3686 { 3687 if (refcount_sub_and_test(n, &memcg->id.ref)) { 3688 mem_cgroup_private_id_remove(memcg); 3689 3690 /* Memcg ID pins CSS */ 3691 css_put(&memcg->css); 3692 } 3693 } 3694 3695 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n) 3696 { 3697 while (!refcount_add_not_zero(n, &memcg->id.ref)) { 3698 /* 3699 * The root cgroup cannot be destroyed, so it's refcount must 3700 * always be >= 1. 3701 */ 3702 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 3703 VM_BUG_ON(1); 3704 break; 3705 } 3706 memcg = parent_mem_cgroup(memcg); 3707 if (!memcg) 3708 memcg = root_mem_cgroup; 3709 } 3710 return memcg; 3711 } 3712 3713 /** 3714 * mem_cgroup_from_private_id - look up a memcg from a memcg id 3715 * @id: the memcg id to look up 3716 * 3717 * Caller must hold rcu_read_lock(). 3718 */ 3719 struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id) 3720 { 3721 WARN_ON_ONCE(!rcu_read_lock_held()); 3722 return xa_load(&mem_cgroup_private_ids, id); 3723 } 3724 3725 struct mem_cgroup *mem_cgroup_get_from_id(u64 id) 3726 { 3727 struct cgroup *cgrp; 3728 struct cgroup_subsys_state *css; 3729 struct mem_cgroup *memcg = NULL; 3730 3731 cgrp = cgroup_get_from_id(id); 3732 if (IS_ERR(cgrp)) 3733 return NULL; 3734 3735 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 3736 if (css) 3737 memcg = container_of(css, struct mem_cgroup, css); 3738 3739 cgroup_put(cgrp); 3740 3741 return memcg; 3742 } 3743 3744 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn) 3745 { 3746 if (!pn) 3747 return; 3748 3749 free_percpu(pn->lruvec_stats_percpu); 3750 kfree(pn->lruvec_stats); 3751 kfree(pn); 3752 } 3753 3754 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 3755 { 3756 struct mem_cgroup_per_node *pn; 3757 3758 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO, 3759 node); 3760 if (!pn) 3761 return false; 3762 3763 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), 3764 GFP_KERNEL_ACCOUNT, node); 3765 if (!pn->lruvec_stats) 3766 goto fail; 3767 3768 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 3769 GFP_KERNEL_ACCOUNT); 3770 if (!pn->lruvec_stats_percpu) 3771 goto fail; 3772 3773 lruvec_init(&pn->lruvec); 3774 pn->memcg = memcg; 3775 3776 memcg->nodeinfo[node] = pn; 3777 return true; 3778 fail: 3779 free_mem_cgroup_per_node_info(pn); 3780 return false; 3781 } 3782 3783 static void __mem_cgroup_free(struct mem_cgroup *memcg) 3784 { 3785 int node; 3786 3787 obj_cgroup_put(memcg->orig_objcg); 3788 3789 for_each_node(node) 3790 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); 3791 memcg1_free_events(memcg); 3792 kfree(memcg->vmstats); 3793 free_percpu(memcg->vmstats_percpu); 3794 kfree(memcg); 3795 } 3796 3797 static void mem_cgroup_free(struct mem_cgroup *memcg) 3798 { 3799 lru_gen_exit_memcg(memcg); 3800 memcg_wb_domain_exit(memcg); 3801 __mem_cgroup_free(memcg); 3802 } 3803 3804 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) 3805 { 3806 struct memcg_vmstats_percpu *statc; 3807 struct memcg_vmstats_percpu __percpu *pstatc_pcpu; 3808 struct mem_cgroup *memcg; 3809 int node, cpu; 3810 int __maybe_unused i; 3811 long error; 3812 3813 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL); 3814 if (!memcg) 3815 return ERR_PTR(-ENOMEM); 3816 3817 error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL, 3818 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL); 3819 if (error) 3820 goto fail; 3821 error = -ENOMEM; 3822 3823 memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT); 3824 if (!memcg->vmstats) 3825 goto fail; 3826 3827 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 3828 GFP_KERNEL_ACCOUNT); 3829 if (!memcg->vmstats_percpu) 3830 goto fail; 3831 3832 if (!memcg1_alloc_events(memcg)) 3833 goto fail; 3834 3835 for_each_possible_cpu(cpu) { 3836 if (parent) 3837 pstatc_pcpu = parent->vmstats_percpu; 3838 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 3839 statc->parent_pcpu = parent ? pstatc_pcpu : NULL; 3840 statc->vmstats = memcg->vmstats; 3841 } 3842 3843 for_each_node(node) 3844 if (!alloc_mem_cgroup_per_node_info(memcg, node)) 3845 goto fail; 3846 3847 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 3848 goto fail; 3849 3850 INIT_WORK(&memcg->high_work, high_work_func); 3851 vmpressure_init(&memcg->vmpressure); 3852 INIT_LIST_HEAD(&memcg->memory_peaks); 3853 INIT_LIST_HEAD(&memcg->swap_peaks); 3854 spin_lock_init(&memcg->peaks_lock); 3855 memcg->socket_pressure = get_jiffies_64(); 3856 #if BITS_PER_LONG < 64 3857 seqlock_init(&memcg->socket_pressure_seqlock); 3858 #endif 3859 memcg1_memcg_init(memcg); 3860 memcg->kmemcg_id = -1; 3861 INIT_LIST_HEAD(&memcg->objcg_list); 3862 #ifdef CONFIG_CGROUP_WRITEBACK 3863 INIT_LIST_HEAD(&memcg->cgwb_list); 3864 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 3865 memcg->cgwb_frn[i].done = 3866 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 3867 #endif 3868 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3869 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 3870 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 3871 memcg->deferred_split_queue.split_queue_len = 0; 3872 #endif 3873 lru_gen_init_memcg(memcg); 3874 return memcg; 3875 fail: 3876 mem_cgroup_private_id_remove(memcg); 3877 __mem_cgroup_free(memcg); 3878 return ERR_PTR(error); 3879 } 3880 3881 static struct cgroup_subsys_state * __ref 3882 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 3883 { 3884 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 3885 struct mem_cgroup *memcg, *old_memcg; 3886 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys); 3887 3888 old_memcg = set_active_memcg(parent); 3889 memcg = mem_cgroup_alloc(parent); 3890 set_active_memcg(old_memcg); 3891 if (IS_ERR(memcg)) 3892 return ERR_CAST(memcg); 3893 3894 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 3895 memcg1_soft_limit_reset(memcg); 3896 #ifdef CONFIG_ZSWAP 3897 memcg->zswap_max = PAGE_COUNTER_MAX; 3898 WRITE_ONCE(memcg->zswap_writeback, true); 3899 #endif 3900 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 3901 if (parent) { 3902 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 3903 3904 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); 3905 page_counter_init(&memcg->swap, &parent->swap, false); 3906 #ifdef CONFIG_MEMCG_V1 3907 memcg->memory.track_failcnt = !memcg_on_dfl; 3908 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 3909 page_counter_init(&memcg->kmem, &parent->kmem, false); 3910 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); 3911 #endif 3912 } else { 3913 init_memcg_stats(); 3914 init_memcg_events(); 3915 page_counter_init(&memcg->memory, NULL, true); 3916 page_counter_init(&memcg->swap, NULL, false); 3917 #ifdef CONFIG_MEMCG_V1 3918 page_counter_init(&memcg->kmem, NULL, false); 3919 page_counter_init(&memcg->tcpmem, NULL, false); 3920 #endif 3921 root_mem_cgroup = memcg; 3922 return &memcg->css; 3923 } 3924 3925 if (memcg_on_dfl && !cgroup_memory_nosocket) 3926 static_branch_inc(&memcg_sockets_enabled_key); 3927 3928 if (!cgroup_memory_nobpf) 3929 static_branch_inc(&memcg_bpf_enabled_key); 3930 3931 return &memcg->css; 3932 } 3933 3934 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 3935 { 3936 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3937 3938 if (memcg_online_kmem(memcg)) 3939 goto remove_id; 3940 3941 /* 3942 * A memcg must be visible for expand_shrinker_info() 3943 * by the time the maps are allocated. So, we allocate maps 3944 * here, when for_each_mem_cgroup() can't skip it. 3945 */ 3946 if (alloc_shrinker_info(memcg)) 3947 goto offline_kmem; 3948 3949 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) 3950 queue_delayed_work(system_dfl_wq, &stats_flush_dwork, 3951 FLUSH_TIME); 3952 lru_gen_online_memcg(memcg); 3953 3954 /* Online state pins memcg ID, memcg ID pins CSS */ 3955 refcount_set(&memcg->id.ref, 1); 3956 css_get(css); 3957 3958 /* 3959 * Ensure mem_cgroup_from_private_id() works once we're fully online. 3960 * 3961 * We could do this earlier and require callers to filter with 3962 * css_tryget_online(). But right now there are no users that 3963 * need earlier access, and the workingset code relies on the 3964 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So 3965 * publish it here at the end of onlining. This matches the 3966 * regular ID destruction during offlining. 3967 */ 3968 xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL); 3969 3970 return 0; 3971 offline_kmem: 3972 memcg_offline_kmem(memcg); 3973 remove_id: 3974 mem_cgroup_private_id_remove(memcg); 3975 return -ENOMEM; 3976 } 3977 3978 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 3979 { 3980 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3981 3982 memcg1_css_offline(memcg); 3983 3984 page_counter_set_min(&memcg->memory, 0); 3985 page_counter_set_low(&memcg->memory, 0); 3986 3987 zswap_memcg_offline_cleanup(memcg); 3988 3989 memcg_offline_kmem(memcg); 3990 reparent_deferred_split_queue(memcg); 3991 reparent_shrinker_deferred(memcg); 3992 wb_memcg_offline(memcg); 3993 lru_gen_offline_memcg(memcg); 3994 3995 drain_all_stock(memcg); 3996 3997 mem_cgroup_private_id_put(memcg, 1); 3998 } 3999 4000 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4001 { 4002 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4003 4004 invalidate_reclaim_iterators(memcg); 4005 lru_gen_release_memcg(memcg); 4006 } 4007 4008 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4009 { 4010 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4011 int __maybe_unused i; 4012 4013 #ifdef CONFIG_CGROUP_WRITEBACK 4014 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 4015 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 4016 #endif 4017 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4018 static_branch_dec(&memcg_sockets_enabled_key); 4019 4020 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) 4021 static_branch_dec(&memcg_sockets_enabled_key); 4022 4023 if (!cgroup_memory_nobpf) 4024 static_branch_dec(&memcg_bpf_enabled_key); 4025 4026 vmpressure_cleanup(&memcg->vmpressure); 4027 cancel_work_sync(&memcg->high_work); 4028 memcg1_remove_from_trees(memcg); 4029 free_shrinker_info(memcg); 4030 mem_cgroup_free(memcg); 4031 } 4032 4033 /** 4034 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4035 * @css: the target css 4036 * 4037 * Reset the states of the mem_cgroup associated with @css. This is 4038 * invoked when the userland requests disabling on the default hierarchy 4039 * but the memcg is pinned through dependency. The memcg should stop 4040 * applying policies and should revert to the vanilla state as it may be 4041 * made visible again. 4042 * 4043 * The current implementation only resets the essential configurations. 4044 * This needs to be expanded to cover all the visible parts. 4045 */ 4046 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4047 { 4048 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4049 4050 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 4051 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 4052 #ifdef CONFIG_MEMCG_V1 4053 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 4054 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 4055 #endif 4056 page_counter_set_min(&memcg->memory, 0); 4057 page_counter_set_low(&memcg->memory, 0); 4058 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 4059 memcg1_soft_limit_reset(memcg); 4060 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 4061 memcg_wb_domain_size_changed(memcg); 4062 } 4063 4064 struct aggregate_control { 4065 /* pointer to the aggregated (CPU and subtree aggregated) counters */ 4066 long *aggregate; 4067 /* pointer to the non-hierarchichal (CPU aggregated) counters */ 4068 long *local; 4069 /* pointer to the pending child counters during tree propagation */ 4070 long *pending; 4071 /* pointer to the parent's pending counters, could be NULL */ 4072 long *ppending; 4073 /* pointer to the percpu counters to be aggregated */ 4074 long *cstat; 4075 /* pointer to the percpu counters of the last aggregation*/ 4076 long *cstat_prev; 4077 /* size of the above counters */ 4078 int size; 4079 }; 4080 4081 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac) 4082 { 4083 int i; 4084 long delta, delta_cpu, v; 4085 4086 for (i = 0; i < ac->size; i++) { 4087 /* 4088 * Collect the aggregated propagation counts of groups 4089 * below us. We're in a per-cpu loop here and this is 4090 * a global counter, so the first cycle will get them. 4091 */ 4092 delta = ac->pending[i]; 4093 if (delta) 4094 ac->pending[i] = 0; 4095 4096 /* Add CPU changes on this level since the last flush */ 4097 delta_cpu = 0; 4098 v = READ_ONCE(ac->cstat[i]); 4099 if (v != ac->cstat_prev[i]) { 4100 delta_cpu = v - ac->cstat_prev[i]; 4101 delta += delta_cpu; 4102 ac->cstat_prev[i] = v; 4103 } 4104 4105 /* Aggregate counts on this level and propagate upwards */ 4106 if (delta_cpu) 4107 ac->local[i] += delta_cpu; 4108 4109 if (delta) { 4110 ac->aggregate[i] += delta; 4111 if (ac->ppending) 4112 ac->ppending[i] += delta; 4113 } 4114 } 4115 } 4116 4117 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 4118 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, 4119 int cpu) 4120 { 4121 int nid; 4122 4123 if (atomic_read(&memcg->kmem_stat)) { 4124 int kmem = atomic_xchg(&memcg->kmem_stat, 0); 4125 int index = memcg_stats_index(MEMCG_KMEM); 4126 4127 memcg->vmstats->state[index] += kmem; 4128 if (parent) 4129 parent->vmstats->state_pending[index] += kmem; 4130 } 4131 4132 for_each_node_state(nid, N_MEMORY) { 4133 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 4134 struct lruvec_stats *lstats = pn->lruvec_stats; 4135 struct lruvec_stats *plstats = NULL; 4136 4137 if (parent) 4138 plstats = parent->nodeinfo[nid]->lruvec_stats; 4139 4140 if (atomic_read(&pn->slab_reclaimable)) { 4141 int slab = atomic_xchg(&pn->slab_reclaimable, 0); 4142 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B); 4143 4144 lstats->state[index] += slab; 4145 if (plstats) 4146 plstats->state_pending[index] += slab; 4147 } 4148 if (atomic_read(&pn->slab_unreclaimable)) { 4149 int slab = atomic_xchg(&pn->slab_unreclaimable, 0); 4150 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B); 4151 4152 lstats->state[index] += slab; 4153 if (plstats) 4154 plstats->state_pending[index] += slab; 4155 } 4156 } 4157 } 4158 #else 4159 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent, 4160 int cpu) 4161 {} 4162 #endif 4163 4164 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 4165 { 4166 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4167 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 4168 struct memcg_vmstats_percpu *statc; 4169 struct aggregate_control ac; 4170 int nid; 4171 4172 flush_nmi_stats(memcg, parent, cpu); 4173 4174 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 4175 4176 ac = (struct aggregate_control) { 4177 .aggregate = memcg->vmstats->state, 4178 .local = memcg->vmstats->state_local, 4179 .pending = memcg->vmstats->state_pending, 4180 .ppending = parent ? parent->vmstats->state_pending : NULL, 4181 .cstat = statc->state, 4182 .cstat_prev = statc->state_prev, 4183 .size = MEMCG_VMSTAT_SIZE, 4184 }; 4185 mem_cgroup_stat_aggregate(&ac); 4186 4187 ac = (struct aggregate_control) { 4188 .aggregate = memcg->vmstats->events, 4189 .local = memcg->vmstats->events_local, 4190 .pending = memcg->vmstats->events_pending, 4191 .ppending = parent ? parent->vmstats->events_pending : NULL, 4192 .cstat = statc->events, 4193 .cstat_prev = statc->events_prev, 4194 .size = NR_MEMCG_EVENTS, 4195 }; 4196 mem_cgroup_stat_aggregate(&ac); 4197 4198 for_each_node_state(nid, N_MEMORY) { 4199 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 4200 struct lruvec_stats *lstats = pn->lruvec_stats; 4201 struct lruvec_stats *plstats = NULL; 4202 struct lruvec_stats_percpu *lstatc; 4203 4204 if (parent) 4205 plstats = parent->nodeinfo[nid]->lruvec_stats; 4206 4207 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 4208 4209 ac = (struct aggregate_control) { 4210 .aggregate = lstats->state, 4211 .local = lstats->state_local, 4212 .pending = lstats->state_pending, 4213 .ppending = plstats ? plstats->state_pending : NULL, 4214 .cstat = lstatc->state, 4215 .cstat_prev = lstatc->state_prev, 4216 .size = NR_MEMCG_NODE_STAT_ITEMS, 4217 }; 4218 mem_cgroup_stat_aggregate(&ac); 4219 4220 } 4221 WRITE_ONCE(statc->stats_updates, 0); 4222 /* We are in a per-cpu loop here, only do the atomic write once */ 4223 if (atomic_read(&memcg->vmstats->stats_updates)) 4224 atomic_set(&memcg->vmstats->stats_updates, 0); 4225 } 4226 4227 static void mem_cgroup_fork(struct task_struct *task) 4228 { 4229 /* 4230 * Set the update flag to cause task->objcg to be initialized lazily 4231 * on the first allocation. It can be done without any synchronization 4232 * because it's always performed on the current task, so does 4233 * current_objcg_update(). 4234 */ 4235 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; 4236 } 4237 4238 static void mem_cgroup_exit(struct task_struct *task) 4239 { 4240 struct obj_cgroup *objcg = task->objcg; 4241 4242 objcg = (struct obj_cgroup *) 4243 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); 4244 obj_cgroup_put(objcg); 4245 4246 /* 4247 * Some kernel allocations can happen after this point, 4248 * but let's ignore them. It can be done without any synchronization 4249 * because it's always performed on the current task, so does 4250 * current_objcg_update(). 4251 */ 4252 task->objcg = NULL; 4253 } 4254 4255 #ifdef CONFIG_LRU_GEN 4256 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) 4257 { 4258 struct task_struct *task; 4259 struct cgroup_subsys_state *css; 4260 4261 /* find the first leader if there is any */ 4262 cgroup_taskset_for_each_leader(task, css, tset) 4263 break; 4264 4265 if (!task) 4266 return; 4267 4268 task_lock(task); 4269 if (task->mm && READ_ONCE(task->mm->owner) == task) 4270 lru_gen_migrate_mm(task->mm); 4271 task_unlock(task); 4272 } 4273 #else 4274 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {} 4275 #endif /* CONFIG_LRU_GEN */ 4276 4277 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) 4278 { 4279 struct task_struct *task; 4280 struct cgroup_subsys_state *css; 4281 4282 cgroup_taskset_for_each(task, css, tset) { 4283 /* atomically set the update bit */ 4284 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); 4285 } 4286 } 4287 4288 static void mem_cgroup_attach(struct cgroup_taskset *tset) 4289 { 4290 mem_cgroup_lru_gen_attach(tset); 4291 mem_cgroup_kmem_attach(tset); 4292 } 4293 4294 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 4295 { 4296 if (value == PAGE_COUNTER_MAX) 4297 seq_puts(m, "max\n"); 4298 else 4299 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 4300 4301 return 0; 4302 } 4303 4304 static u64 memory_current_read(struct cgroup_subsys_state *css, 4305 struct cftype *cft) 4306 { 4307 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4308 4309 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 4310 } 4311 4312 #define OFP_PEAK_UNSET (((-1UL))) 4313 4314 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc) 4315 { 4316 struct cgroup_of_peak *ofp = of_peak(sf->private); 4317 u64 fd_peak = READ_ONCE(ofp->value), peak; 4318 4319 /* User wants global or local peak? */ 4320 if (fd_peak == OFP_PEAK_UNSET) 4321 peak = pc->watermark; 4322 else 4323 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); 4324 4325 seq_printf(sf, "%llu\n", peak * PAGE_SIZE); 4326 return 0; 4327 } 4328 4329 static int memory_peak_show(struct seq_file *sf, void *v) 4330 { 4331 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 4332 4333 return peak_show(sf, v, &memcg->memory); 4334 } 4335 4336 static int peak_open(struct kernfs_open_file *of) 4337 { 4338 struct cgroup_of_peak *ofp = of_peak(of); 4339 4340 ofp->value = OFP_PEAK_UNSET; 4341 return 0; 4342 } 4343 4344 static void peak_release(struct kernfs_open_file *of) 4345 { 4346 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4347 struct cgroup_of_peak *ofp = of_peak(of); 4348 4349 if (ofp->value == OFP_PEAK_UNSET) { 4350 /* fast path (no writes on this fd) */ 4351 return; 4352 } 4353 spin_lock(&memcg->peaks_lock); 4354 list_del(&ofp->list); 4355 spin_unlock(&memcg->peaks_lock); 4356 } 4357 4358 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, 4359 loff_t off, struct page_counter *pc, 4360 struct list_head *watchers) 4361 { 4362 unsigned long usage; 4363 struct cgroup_of_peak *peer_ctx; 4364 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4365 struct cgroup_of_peak *ofp = of_peak(of); 4366 4367 spin_lock(&memcg->peaks_lock); 4368 4369 usage = page_counter_read(pc); 4370 WRITE_ONCE(pc->local_watermark, usage); 4371 4372 list_for_each_entry(peer_ctx, watchers, list) 4373 if (usage > peer_ctx->value) 4374 WRITE_ONCE(peer_ctx->value, usage); 4375 4376 /* initial write, register watcher */ 4377 if (ofp->value == OFP_PEAK_UNSET) 4378 list_add(&ofp->list, watchers); 4379 4380 WRITE_ONCE(ofp->value, usage); 4381 spin_unlock(&memcg->peaks_lock); 4382 4383 return nbytes; 4384 } 4385 4386 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf, 4387 size_t nbytes, loff_t off) 4388 { 4389 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4390 4391 return peak_write(of, buf, nbytes, off, &memcg->memory, 4392 &memcg->memory_peaks); 4393 } 4394 4395 #undef OFP_PEAK_UNSET 4396 4397 static int memory_min_show(struct seq_file *m, void *v) 4398 { 4399 return seq_puts_memcg_tunable(m, 4400 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 4401 } 4402 4403 static ssize_t memory_min_write(struct kernfs_open_file *of, 4404 char *buf, size_t nbytes, loff_t off) 4405 { 4406 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4407 unsigned long min; 4408 int err; 4409 4410 buf = strstrip(buf); 4411 err = page_counter_memparse(buf, "max", &min); 4412 if (err) 4413 return err; 4414 4415 page_counter_set_min(&memcg->memory, min); 4416 4417 return nbytes; 4418 } 4419 4420 static int memory_low_show(struct seq_file *m, void *v) 4421 { 4422 return seq_puts_memcg_tunable(m, 4423 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 4424 } 4425 4426 static ssize_t memory_low_write(struct kernfs_open_file *of, 4427 char *buf, size_t nbytes, loff_t off) 4428 { 4429 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4430 unsigned long low; 4431 int err; 4432 4433 buf = strstrip(buf); 4434 err = page_counter_memparse(buf, "max", &low); 4435 if (err) 4436 return err; 4437 4438 page_counter_set_low(&memcg->memory, low); 4439 4440 return nbytes; 4441 } 4442 4443 static int memory_high_show(struct seq_file *m, void *v) 4444 { 4445 return seq_puts_memcg_tunable(m, 4446 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 4447 } 4448 4449 static ssize_t memory_high_write(struct kernfs_open_file *of, 4450 char *buf, size_t nbytes, loff_t off) 4451 { 4452 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4453 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 4454 bool drained = false; 4455 unsigned long high; 4456 int err; 4457 4458 buf = strstrip(buf); 4459 err = page_counter_memparse(buf, "max", &high); 4460 if (err) 4461 return err; 4462 4463 page_counter_set_high(&memcg->memory, high); 4464 4465 if (of->file->f_flags & O_NONBLOCK) 4466 goto out; 4467 4468 for (;;) { 4469 unsigned long nr_pages = page_counter_read(&memcg->memory); 4470 unsigned long reclaimed; 4471 4472 if (nr_pages <= high) 4473 break; 4474 4475 if (signal_pending(current)) 4476 break; 4477 4478 if (!drained) { 4479 drain_all_stock(memcg); 4480 drained = true; 4481 continue; 4482 } 4483 4484 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 4485 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); 4486 4487 if (!reclaimed && !nr_retries--) 4488 break; 4489 } 4490 out: 4491 memcg_wb_domain_size_changed(memcg); 4492 return nbytes; 4493 } 4494 4495 static int memory_max_show(struct seq_file *m, void *v) 4496 { 4497 return seq_puts_memcg_tunable(m, 4498 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 4499 } 4500 4501 static ssize_t memory_max_write(struct kernfs_open_file *of, 4502 char *buf, size_t nbytes, loff_t off) 4503 { 4504 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4505 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 4506 bool drained = false; 4507 unsigned long max; 4508 int err; 4509 4510 buf = strstrip(buf); 4511 err = page_counter_memparse(buf, "max", &max); 4512 if (err) 4513 return err; 4514 4515 xchg(&memcg->memory.max, max); 4516 4517 if (of->file->f_flags & O_NONBLOCK) 4518 goto out; 4519 4520 for (;;) { 4521 unsigned long nr_pages = page_counter_read(&memcg->memory); 4522 4523 if (nr_pages <= max) 4524 break; 4525 4526 if (signal_pending(current)) 4527 break; 4528 4529 if (!drained) { 4530 drain_all_stock(memcg); 4531 drained = true; 4532 continue; 4533 } 4534 4535 if (nr_reclaims) { 4536 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 4537 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) 4538 nr_reclaims--; 4539 continue; 4540 } 4541 4542 memcg_memory_event(memcg, MEMCG_OOM); 4543 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 4544 break; 4545 cond_resched(); 4546 } 4547 out: 4548 memcg_wb_domain_size_changed(memcg); 4549 return nbytes; 4550 } 4551 4552 /* 4553 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener' 4554 * if any new events become available. 4555 */ 4556 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 4557 { 4558 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 4559 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 4560 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 4561 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 4562 seq_printf(m, "oom_kill %lu\n", 4563 atomic_long_read(&events[MEMCG_OOM_KILL])); 4564 seq_printf(m, "oom_group_kill %lu\n", 4565 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 4566 seq_printf(m, "sock_throttled %lu\n", 4567 atomic_long_read(&events[MEMCG_SOCK_THROTTLED])); 4568 } 4569 4570 static int memory_events_show(struct seq_file *m, void *v) 4571 { 4572 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4573 4574 __memory_events_show(m, memcg->memory_events); 4575 return 0; 4576 } 4577 4578 static int memory_events_local_show(struct seq_file *m, void *v) 4579 { 4580 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4581 4582 __memory_events_show(m, memcg->memory_events_local); 4583 return 0; 4584 } 4585 4586 int memory_stat_show(struct seq_file *m, void *v) 4587 { 4588 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4589 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL); 4590 struct seq_buf s; 4591 4592 if (!buf) 4593 return -ENOMEM; 4594 seq_buf_init(&s, buf, SEQ_BUF_SIZE); 4595 memory_stat_format(memcg, &s); 4596 seq_puts(m, buf); 4597 kfree(buf); 4598 return 0; 4599 } 4600 4601 #ifdef CONFIG_NUMA 4602 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 4603 int item) 4604 { 4605 return lruvec_page_state(lruvec, item) * 4606 memcg_page_state_output_unit(item); 4607 } 4608 4609 static int memory_numa_stat_show(struct seq_file *m, void *v) 4610 { 4611 int i; 4612 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4613 4614 mem_cgroup_flush_stats(memcg); 4615 4616 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 4617 int nid; 4618 4619 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 4620 continue; 4621 4622 seq_printf(m, "%s", memory_stats[i].name); 4623 for_each_node_state(nid, N_MEMORY) { 4624 u64 size; 4625 struct lruvec *lruvec; 4626 4627 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 4628 size = lruvec_page_state_output(lruvec, 4629 memory_stats[i].idx); 4630 seq_printf(m, " N%d=%llu", nid, size); 4631 } 4632 seq_putc(m, '\n'); 4633 } 4634 4635 return 0; 4636 } 4637 #endif 4638 4639 static int memory_oom_group_show(struct seq_file *m, void *v) 4640 { 4641 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4642 4643 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 4644 4645 return 0; 4646 } 4647 4648 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 4649 char *buf, size_t nbytes, loff_t off) 4650 { 4651 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4652 int ret, oom_group; 4653 4654 buf = strstrip(buf); 4655 if (!buf) 4656 return -EINVAL; 4657 4658 ret = kstrtoint(buf, 0, &oom_group); 4659 if (ret) 4660 return ret; 4661 4662 if (oom_group != 0 && oom_group != 1) 4663 return -EINVAL; 4664 4665 WRITE_ONCE(memcg->oom_group, oom_group); 4666 4667 return nbytes; 4668 } 4669 4670 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 4671 size_t nbytes, loff_t off) 4672 { 4673 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 4674 int ret; 4675 4676 ret = user_proactive_reclaim(buf, memcg, NULL); 4677 if (ret) 4678 return ret; 4679 4680 return nbytes; 4681 } 4682 4683 static struct cftype memory_files[] = { 4684 { 4685 .name = "current", 4686 .flags = CFTYPE_NOT_ON_ROOT, 4687 .read_u64 = memory_current_read, 4688 }, 4689 { 4690 .name = "peak", 4691 .flags = CFTYPE_NOT_ON_ROOT, 4692 .open = peak_open, 4693 .release = peak_release, 4694 .seq_show = memory_peak_show, 4695 .write = memory_peak_write, 4696 }, 4697 { 4698 .name = "min", 4699 .flags = CFTYPE_NOT_ON_ROOT, 4700 .seq_show = memory_min_show, 4701 .write = memory_min_write, 4702 }, 4703 { 4704 .name = "low", 4705 .flags = CFTYPE_NOT_ON_ROOT, 4706 .seq_show = memory_low_show, 4707 .write = memory_low_write, 4708 }, 4709 { 4710 .name = "high", 4711 .flags = CFTYPE_NOT_ON_ROOT, 4712 .seq_show = memory_high_show, 4713 .write = memory_high_write, 4714 }, 4715 { 4716 .name = "max", 4717 .flags = CFTYPE_NOT_ON_ROOT, 4718 .seq_show = memory_max_show, 4719 .write = memory_max_write, 4720 }, 4721 { 4722 .name = "events", 4723 .flags = CFTYPE_NOT_ON_ROOT, 4724 .file_offset = offsetof(struct mem_cgroup, events_file), 4725 .seq_show = memory_events_show, 4726 }, 4727 { 4728 .name = "events.local", 4729 .flags = CFTYPE_NOT_ON_ROOT, 4730 .file_offset = offsetof(struct mem_cgroup, events_local_file), 4731 .seq_show = memory_events_local_show, 4732 }, 4733 { 4734 .name = "stat", 4735 .seq_show = memory_stat_show, 4736 }, 4737 #ifdef CONFIG_NUMA 4738 { 4739 .name = "numa_stat", 4740 .seq_show = memory_numa_stat_show, 4741 }, 4742 #endif 4743 { 4744 .name = "oom.group", 4745 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 4746 .seq_show = memory_oom_group_show, 4747 .write = memory_oom_group_write, 4748 }, 4749 { 4750 .name = "reclaim", 4751 .flags = CFTYPE_NS_DELEGATABLE, 4752 .write = memory_reclaim, 4753 }, 4754 { } /* terminate */ 4755 }; 4756 4757 struct cgroup_subsys memory_cgrp_subsys = { 4758 .css_alloc = mem_cgroup_css_alloc, 4759 .css_online = mem_cgroup_css_online, 4760 .css_offline = mem_cgroup_css_offline, 4761 .css_released = mem_cgroup_css_released, 4762 .css_free = mem_cgroup_css_free, 4763 .css_reset = mem_cgroup_css_reset, 4764 .css_rstat_flush = mem_cgroup_css_rstat_flush, 4765 .attach = mem_cgroup_attach, 4766 .fork = mem_cgroup_fork, 4767 .exit = mem_cgroup_exit, 4768 .dfl_cftypes = memory_files, 4769 #ifdef CONFIG_MEMCG_V1 4770 .legacy_cftypes = mem_cgroup_legacy_files, 4771 #endif 4772 .early_init = 0, 4773 }; 4774 4775 /** 4776 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 4777 * @root: the top ancestor of the sub-tree being checked 4778 * @memcg: the memory cgroup to check 4779 * 4780 * WARNING: This function is not stateless! It can only be used as part 4781 * of a top-down tree iteration, not for isolated queries. 4782 */ 4783 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 4784 struct mem_cgroup *memcg) 4785 { 4786 bool recursive_protection = 4787 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT; 4788 4789 if (mem_cgroup_disabled()) 4790 return; 4791 4792 if (!root) 4793 root = root_mem_cgroup; 4794 4795 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); 4796 } 4797 4798 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 4799 gfp_t gfp) 4800 { 4801 int ret; 4802 4803 ret = try_charge(memcg, gfp, folio_nr_pages(folio)); 4804 if (ret) 4805 goto out; 4806 4807 css_get(&memcg->css); 4808 commit_charge(folio, memcg); 4809 memcg1_commit_charge(folio, memcg); 4810 out: 4811 return ret; 4812 } 4813 4814 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 4815 { 4816 struct mem_cgroup *memcg; 4817 int ret; 4818 4819 memcg = get_mem_cgroup_from_mm(mm); 4820 ret = charge_memcg(folio, memcg, gfp); 4821 css_put(&memcg->css); 4822 4823 return ret; 4824 } 4825 4826 /** 4827 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio 4828 * @folio: folio being charged 4829 * @gfp: reclaim mode 4830 * 4831 * This function is called when allocating a huge page folio, after the page has 4832 * already been obtained and charged to the appropriate hugetlb cgroup 4833 * controller (if it is enabled). 4834 * 4835 * Returns ENOMEM if the memcg is already full. 4836 * Returns 0 if either the charge was successful, or if we skip the charging. 4837 */ 4838 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) 4839 { 4840 struct mem_cgroup *memcg = get_mem_cgroup_from_current(); 4841 int ret = 0; 4842 4843 /* 4844 * Even memcg does not account for hugetlb, we still want to update 4845 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip 4846 * charging the memcg. 4847 */ 4848 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || 4849 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4850 goto out; 4851 4852 if (charge_memcg(folio, memcg, gfp)) 4853 ret = -ENOMEM; 4854 4855 out: 4856 mem_cgroup_put(memcg); 4857 return ret; 4858 } 4859 4860 /** 4861 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 4862 * @folio: folio to charge. 4863 * @mm: mm context of the victim 4864 * @gfp: reclaim mode 4865 * @entry: swap entry for which the folio is allocated 4866 * 4867 * This function charges a folio allocated for swapin. Please call this before 4868 * adding the folio to the swapcache. 4869 * 4870 * Returns 0 on success. Otherwise, an error code is returned. 4871 */ 4872 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 4873 gfp_t gfp, swp_entry_t entry) 4874 { 4875 struct mem_cgroup *memcg; 4876 unsigned short id; 4877 int ret; 4878 4879 if (mem_cgroup_disabled()) 4880 return 0; 4881 4882 id = lookup_swap_cgroup_id(entry); 4883 rcu_read_lock(); 4884 memcg = mem_cgroup_from_private_id(id); 4885 if (!memcg || !css_tryget_online(&memcg->css)) 4886 memcg = get_mem_cgroup_from_mm(mm); 4887 rcu_read_unlock(); 4888 4889 ret = charge_memcg(folio, memcg, gfp); 4890 4891 css_put(&memcg->css); 4892 return ret; 4893 } 4894 4895 struct uncharge_gather { 4896 struct mem_cgroup *memcg; 4897 unsigned long nr_memory; 4898 unsigned long pgpgout; 4899 unsigned long nr_kmem; 4900 int nid; 4901 }; 4902 4903 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 4904 { 4905 memset(ug, 0, sizeof(*ug)); 4906 } 4907 4908 static void uncharge_batch(const struct uncharge_gather *ug) 4909 { 4910 if (ug->nr_memory) { 4911 memcg_uncharge(ug->memcg, ug->nr_memory); 4912 if (ug->nr_kmem) { 4913 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); 4914 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); 4915 } 4916 memcg1_oom_recover(ug->memcg); 4917 } 4918 4919 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); 4920 4921 /* drop reference from uncharge_folio */ 4922 css_put(&ug->memcg->css); 4923 } 4924 4925 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 4926 { 4927 long nr_pages; 4928 struct mem_cgroup *memcg; 4929 struct obj_cgroup *objcg; 4930 4931 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4932 4933 /* 4934 * Nobody should be changing or seriously looking at 4935 * folio memcg or objcg at this point, we have fully 4936 * exclusive access to the folio. 4937 */ 4938 if (folio_memcg_kmem(folio)) { 4939 objcg = __folio_objcg(folio); 4940 /* 4941 * This get matches the put at the end of the function and 4942 * kmem pages do not hold memcg references anymore. 4943 */ 4944 memcg = get_mem_cgroup_from_objcg(objcg); 4945 } else { 4946 memcg = __folio_memcg(folio); 4947 } 4948 4949 if (!memcg) 4950 return; 4951 4952 if (ug->memcg != memcg) { 4953 if (ug->memcg) { 4954 uncharge_batch(ug); 4955 uncharge_gather_clear(ug); 4956 } 4957 ug->memcg = memcg; 4958 ug->nid = folio_nid(folio); 4959 4960 /* pairs with css_put in uncharge_batch */ 4961 css_get(&memcg->css); 4962 } 4963 4964 nr_pages = folio_nr_pages(folio); 4965 4966 if (folio_memcg_kmem(folio)) { 4967 ug->nr_memory += nr_pages; 4968 ug->nr_kmem += nr_pages; 4969 4970 folio->memcg_data = 0; 4971 obj_cgroup_put(objcg); 4972 } else { 4973 /* LRU pages aren't accounted at the root level */ 4974 if (!mem_cgroup_is_root(memcg)) 4975 ug->nr_memory += nr_pages; 4976 ug->pgpgout++; 4977 4978 WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); 4979 folio->memcg_data = 0; 4980 } 4981 4982 css_put(&memcg->css); 4983 } 4984 4985 void __mem_cgroup_uncharge(struct folio *folio) 4986 { 4987 struct uncharge_gather ug; 4988 4989 /* Don't touch folio->lru of any random page, pre-check: */ 4990 if (!folio_memcg_charged(folio)) 4991 return; 4992 4993 uncharge_gather_clear(&ug); 4994 uncharge_folio(folio, &ug); 4995 uncharge_batch(&ug); 4996 } 4997 4998 void __mem_cgroup_uncharge_folios(struct folio_batch *folios) 4999 { 5000 struct uncharge_gather ug; 5001 unsigned int i; 5002 5003 uncharge_gather_clear(&ug); 5004 for (i = 0; i < folios->nr; i++) 5005 uncharge_folio(folios->folios[i], &ug); 5006 if (ug.memcg) 5007 uncharge_batch(&ug); 5008 } 5009 5010 /** 5011 * mem_cgroup_replace_folio - Charge a folio's replacement. 5012 * @old: Currently circulating folio. 5013 * @new: Replacement folio. 5014 * 5015 * Charge @new as a replacement folio for @old. @old will 5016 * be uncharged upon free. 5017 * 5018 * Both folios must be locked, @new->mapping must be set up. 5019 */ 5020 void mem_cgroup_replace_folio(struct folio *old, struct folio *new) 5021 { 5022 struct mem_cgroup *memcg; 5023 long nr_pages = folio_nr_pages(new); 5024 5025 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 5026 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 5027 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 5028 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 5029 5030 if (mem_cgroup_disabled()) 5031 return; 5032 5033 /* Page cache replacement: new folio already charged? */ 5034 if (folio_memcg_charged(new)) 5035 return; 5036 5037 memcg = folio_memcg(old); 5038 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 5039 if (!memcg) 5040 return; 5041 5042 /* Force-charge the new page. The old one will be freed soon */ 5043 if (!mem_cgroup_is_root(memcg)) { 5044 page_counter_charge(&memcg->memory, nr_pages); 5045 if (do_memsw_account()) 5046 page_counter_charge(&memcg->memsw, nr_pages); 5047 } 5048 5049 css_get(&memcg->css); 5050 commit_charge(new, memcg); 5051 memcg1_commit_charge(new, memcg); 5052 } 5053 5054 /** 5055 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio. 5056 * @old: Currently circulating folio. 5057 * @new: Replacement folio. 5058 * 5059 * Transfer the memcg data from the old folio to the new folio for migration. 5060 * The old folio's data info will be cleared. Note that the memory counters 5061 * will remain unchanged throughout the process. 5062 * 5063 * Both folios must be locked, @new->mapping must be set up. 5064 */ 5065 void mem_cgroup_migrate(struct folio *old, struct folio *new) 5066 { 5067 struct mem_cgroup *memcg; 5068 5069 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 5070 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 5071 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 5072 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new); 5073 VM_BUG_ON_FOLIO(folio_test_lru(old), old); 5074 5075 if (mem_cgroup_disabled()) 5076 return; 5077 5078 memcg = folio_memcg(old); 5079 /* 5080 * Note that it is normal to see !memcg for a hugetlb folio. 5081 * For e.g, it could have been allocated when memory_hugetlb_accounting 5082 * was not selected. 5083 */ 5084 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); 5085 if (!memcg) 5086 return; 5087 5088 /* Transfer the charge and the css ref */ 5089 commit_charge(new, memcg); 5090 5091 /* Warning should never happen, so don't worry about refcount non-0 */ 5092 WARN_ON_ONCE(folio_unqueue_deferred_split(old)); 5093 old->memcg_data = 0; 5094 } 5095 5096 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5097 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5098 5099 void mem_cgroup_sk_alloc(struct sock *sk) 5100 { 5101 struct mem_cgroup *memcg; 5102 5103 if (!mem_cgroup_sockets_enabled) 5104 return; 5105 5106 /* Do not associate the sock with unrelated interrupted task's memcg. */ 5107 if (!in_task()) 5108 return; 5109 5110 rcu_read_lock(); 5111 memcg = mem_cgroup_from_task(current); 5112 if (mem_cgroup_is_root(memcg)) 5113 goto out; 5114 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) 5115 goto out; 5116 if (css_tryget(&memcg->css)) 5117 sk->sk_memcg = memcg; 5118 out: 5119 rcu_read_unlock(); 5120 } 5121 5122 void mem_cgroup_sk_free(struct sock *sk) 5123 { 5124 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5125 5126 if (memcg) 5127 css_put(&memcg->css); 5128 } 5129 5130 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk) 5131 { 5132 struct mem_cgroup *memcg; 5133 5134 if (sk->sk_memcg == newsk->sk_memcg) 5135 return; 5136 5137 mem_cgroup_sk_free(newsk); 5138 5139 memcg = mem_cgroup_from_sk(sk); 5140 if (memcg) 5141 css_get(&memcg->css); 5142 5143 newsk->sk_memcg = sk->sk_memcg; 5144 } 5145 5146 /** 5147 * mem_cgroup_sk_charge - charge socket memory 5148 * @sk: socket in memcg to charge 5149 * @nr_pages: number of pages to charge 5150 * @gfp_mask: reclaim mode 5151 * 5152 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5153 * @memcg's configured limit, %false if it doesn't. 5154 */ 5155 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, 5156 gfp_t gfp_mask) 5157 { 5158 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5159 5160 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5161 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); 5162 5163 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) { 5164 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 5165 return true; 5166 } 5167 5168 return false; 5169 } 5170 5171 /** 5172 * mem_cgroup_sk_uncharge - uncharge socket memory 5173 * @sk: socket in memcg to uncharge 5174 * @nr_pages: number of pages to uncharge 5175 */ 5176 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages) 5177 { 5178 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); 5179 5180 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5181 memcg1_uncharge_skmem(memcg, nr_pages); 5182 return; 5183 } 5184 5185 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 5186 5187 refill_stock(memcg, nr_pages); 5188 } 5189 5190 void mem_cgroup_flush_workqueue(void) 5191 { 5192 flush_workqueue(memcg_wq); 5193 } 5194 5195 static int __init cgroup_memory(char *s) 5196 { 5197 char *token; 5198 5199 while ((token = strsep(&s, ",")) != NULL) { 5200 if (!*token) 5201 continue; 5202 if (!strcmp(token, "nosocket")) 5203 cgroup_memory_nosocket = true; 5204 if (!strcmp(token, "nokmem")) 5205 cgroup_memory_nokmem = true; 5206 if (!strcmp(token, "nobpf")) 5207 cgroup_memory_nobpf = true; 5208 } 5209 return 1; 5210 } 5211 __setup("cgroup.memory=", cgroup_memory); 5212 5213 /* 5214 * Memory controller init before cgroup_init() initialize root_mem_cgroup. 5215 * 5216 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 5217 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 5218 * basically everything that doesn't depend on a specific mem_cgroup structure 5219 * should be initialized from here. 5220 */ 5221 int __init mem_cgroup_init(void) 5222 { 5223 unsigned int memcg_size; 5224 int cpu; 5225 5226 /* 5227 * Currently s32 type (can refer to struct batched_lruvec_stat) is 5228 * used for per-memcg-per-cpu caching of per-node statistics. In order 5229 * to work fine, we should make sure that the overfill threshold can't 5230 * exceed S32_MAX / PAGE_SIZE. 5231 */ 5232 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 5233 5234 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 5235 memcg_hotplug_cpu_dead); 5236 5237 memcg_wq = alloc_workqueue("memcg", WQ_PERCPU, 0); 5238 WARN_ON(!memcg_wq); 5239 5240 for_each_possible_cpu(cpu) { 5241 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5242 drain_local_memcg_stock); 5243 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work, 5244 drain_local_obj_stock); 5245 } 5246 5247 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids); 5248 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0, 5249 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); 5250 5251 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node, 5252 SLAB_PANIC | SLAB_HWCACHE_ALIGN); 5253 5254 return 0; 5255 } 5256 5257 #ifdef CONFIG_SWAP 5258 /** 5259 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 5260 * @folio: folio being added to swap 5261 * @entry: swap entry to charge 5262 * 5263 * Try to charge @folio's memcg for the swap space at @entry. 5264 * 5265 * Returns 0 on success, -ENOMEM on failure. 5266 */ 5267 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 5268 { 5269 unsigned int nr_pages = folio_nr_pages(folio); 5270 struct page_counter *counter; 5271 struct mem_cgroup *memcg; 5272 5273 if (do_memsw_account()) 5274 return 0; 5275 5276 memcg = folio_memcg(folio); 5277 5278 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 5279 if (!memcg) 5280 return 0; 5281 5282 if (!entry.val) { 5283 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5284 return 0; 5285 } 5286 5287 memcg = mem_cgroup_private_id_get_online(memcg, nr_pages); 5288 5289 if (!mem_cgroup_is_root(memcg) && 5290 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 5291 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 5292 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 5293 mem_cgroup_private_id_put(memcg, nr_pages); 5294 return -ENOMEM; 5295 } 5296 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 5297 5298 swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry); 5299 5300 return 0; 5301 } 5302 5303 /** 5304 * __mem_cgroup_uncharge_swap - uncharge swap space 5305 * @entry: swap entry to uncharge 5306 * @nr_pages: the amount of swap space to uncharge 5307 */ 5308 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 5309 { 5310 struct mem_cgroup *memcg; 5311 unsigned short id; 5312 5313 id = swap_cgroup_clear(entry, nr_pages); 5314 rcu_read_lock(); 5315 memcg = mem_cgroup_from_private_id(id); 5316 if (memcg) { 5317 if (!mem_cgroup_is_root(memcg)) { 5318 if (do_memsw_account()) 5319 page_counter_uncharge(&memcg->memsw, nr_pages); 5320 else 5321 page_counter_uncharge(&memcg->swap, nr_pages); 5322 } 5323 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 5324 mem_cgroup_private_id_put(memcg, nr_pages); 5325 } 5326 rcu_read_unlock(); 5327 } 5328 5329 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 5330 { 5331 long nr_swap_pages = get_nr_swap_pages(); 5332 5333 if (mem_cgroup_disabled() || do_memsw_account()) 5334 return nr_swap_pages; 5335 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 5336 nr_swap_pages = min_t(long, nr_swap_pages, 5337 READ_ONCE(memcg->swap.max) - 5338 page_counter_read(&memcg->swap)); 5339 return nr_swap_pages; 5340 } 5341 5342 bool mem_cgroup_swap_full(struct folio *folio) 5343 { 5344 struct mem_cgroup *memcg; 5345 5346 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5347 5348 if (vm_swap_full()) 5349 return true; 5350 if (do_memsw_account()) 5351 return false; 5352 5353 memcg = folio_memcg(folio); 5354 if (!memcg) 5355 return false; 5356 5357 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 5358 unsigned long usage = page_counter_read(&memcg->swap); 5359 5360 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 5361 usage * 2 >= READ_ONCE(memcg->swap.max)) 5362 return true; 5363 } 5364 5365 return false; 5366 } 5367 5368 static int __init setup_swap_account(char *s) 5369 { 5370 bool res; 5371 5372 if (!kstrtobool(s, &res) && !res) 5373 pr_warn_once("The swapaccount=0 commandline option is deprecated " 5374 "in favor of configuring swap control via cgroupfs. " 5375 "Please report your usecase to linux-mm@kvack.org if you " 5376 "depend on this functionality.\n"); 5377 return 1; 5378 } 5379 __setup("swapaccount=", setup_swap_account); 5380 5381 static u64 swap_current_read(struct cgroup_subsys_state *css, 5382 struct cftype *cft) 5383 { 5384 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5385 5386 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 5387 } 5388 5389 static int swap_peak_show(struct seq_file *sf, void *v) 5390 { 5391 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 5392 5393 return peak_show(sf, v, &memcg->swap); 5394 } 5395 5396 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf, 5397 size_t nbytes, loff_t off) 5398 { 5399 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5400 5401 return peak_write(of, buf, nbytes, off, &memcg->swap, 5402 &memcg->swap_peaks); 5403 } 5404 5405 static int swap_high_show(struct seq_file *m, void *v) 5406 { 5407 return seq_puts_memcg_tunable(m, 5408 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 5409 } 5410 5411 static ssize_t swap_high_write(struct kernfs_open_file *of, 5412 char *buf, size_t nbytes, loff_t off) 5413 { 5414 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5415 unsigned long high; 5416 int err; 5417 5418 buf = strstrip(buf); 5419 err = page_counter_memparse(buf, "max", &high); 5420 if (err) 5421 return err; 5422 5423 page_counter_set_high(&memcg->swap, high); 5424 5425 return nbytes; 5426 } 5427 5428 static int swap_max_show(struct seq_file *m, void *v) 5429 { 5430 return seq_puts_memcg_tunable(m, 5431 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 5432 } 5433 5434 static ssize_t swap_max_write(struct kernfs_open_file *of, 5435 char *buf, size_t nbytes, loff_t off) 5436 { 5437 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5438 unsigned long max; 5439 int err; 5440 5441 buf = strstrip(buf); 5442 err = page_counter_memparse(buf, "max", &max); 5443 if (err) 5444 return err; 5445 5446 xchg(&memcg->swap.max, max); 5447 5448 return nbytes; 5449 } 5450 5451 static int swap_events_show(struct seq_file *m, void *v) 5452 { 5453 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5454 5455 seq_printf(m, "high %lu\n", 5456 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 5457 seq_printf(m, "max %lu\n", 5458 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 5459 seq_printf(m, "fail %lu\n", 5460 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 5461 5462 return 0; 5463 } 5464 5465 static struct cftype swap_files[] = { 5466 { 5467 .name = "swap.current", 5468 .flags = CFTYPE_NOT_ON_ROOT, 5469 .read_u64 = swap_current_read, 5470 }, 5471 { 5472 .name = "swap.high", 5473 .flags = CFTYPE_NOT_ON_ROOT, 5474 .seq_show = swap_high_show, 5475 .write = swap_high_write, 5476 }, 5477 { 5478 .name = "swap.max", 5479 .flags = CFTYPE_NOT_ON_ROOT, 5480 .seq_show = swap_max_show, 5481 .write = swap_max_write, 5482 }, 5483 { 5484 .name = "swap.peak", 5485 .flags = CFTYPE_NOT_ON_ROOT, 5486 .open = peak_open, 5487 .release = peak_release, 5488 .seq_show = swap_peak_show, 5489 .write = swap_peak_write, 5490 }, 5491 { 5492 .name = "swap.events", 5493 .flags = CFTYPE_NOT_ON_ROOT, 5494 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 5495 .seq_show = swap_events_show, 5496 }, 5497 { } /* terminate */ 5498 }; 5499 5500 #ifdef CONFIG_ZSWAP 5501 /** 5502 * obj_cgroup_may_zswap - check if this cgroup can zswap 5503 * @objcg: the object cgroup 5504 * 5505 * Check if the hierarchical zswap limit has been reached. 5506 * 5507 * This doesn't check for specific headroom, and it is not atomic 5508 * either. But with zswap, the size of the allocation is only known 5509 * once compression has occurred, and this optimistic pre-check avoids 5510 * spending cycles on compression when there is already no room left 5511 * or zswap is disabled altogether somewhere in the hierarchy. 5512 */ 5513 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 5514 { 5515 struct mem_cgroup *memcg, *original_memcg; 5516 bool ret = true; 5517 5518 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5519 return true; 5520 5521 original_memcg = get_mem_cgroup_from_objcg(objcg); 5522 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 5523 memcg = parent_mem_cgroup(memcg)) { 5524 unsigned long max = READ_ONCE(memcg->zswap_max); 5525 unsigned long pages; 5526 5527 if (max == PAGE_COUNTER_MAX) 5528 continue; 5529 if (max == 0) { 5530 ret = false; 5531 break; 5532 } 5533 5534 /* Force flush to get accurate stats for charging */ 5535 __mem_cgroup_flush_stats(memcg, true); 5536 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 5537 if (pages < max) 5538 continue; 5539 ret = false; 5540 break; 5541 } 5542 mem_cgroup_put(original_memcg); 5543 return ret; 5544 } 5545 5546 /** 5547 * obj_cgroup_charge_zswap - charge compression backend memory 5548 * @objcg: the object cgroup 5549 * @size: size of compressed object 5550 * 5551 * This forces the charge after obj_cgroup_may_zswap() allowed 5552 * compression and storage in zswap for this cgroup to go ahead. 5553 */ 5554 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 5555 { 5556 struct mem_cgroup *memcg; 5557 5558 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5559 return; 5560 5561 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 5562 5563 /* PF_MEMALLOC context, charging must succeed */ 5564 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 5565 VM_WARN_ON_ONCE(1); 5566 5567 rcu_read_lock(); 5568 memcg = obj_cgroup_memcg(objcg); 5569 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 5570 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 5571 if (size == PAGE_SIZE) 5572 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, 1); 5573 rcu_read_unlock(); 5574 } 5575 5576 /** 5577 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 5578 * @objcg: the object cgroup 5579 * @size: size of compressed object 5580 * 5581 * Uncharges zswap memory on page in. 5582 */ 5583 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 5584 { 5585 struct mem_cgroup *memcg; 5586 5587 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5588 return; 5589 5590 obj_cgroup_uncharge(objcg, size); 5591 5592 rcu_read_lock(); 5593 memcg = obj_cgroup_memcg(objcg); 5594 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 5595 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 5596 if (size == PAGE_SIZE) 5597 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, -1); 5598 rcu_read_unlock(); 5599 } 5600 5601 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 5602 { 5603 /* if zswap is disabled, do not block pages going to the swapping device */ 5604 if (!zswap_is_enabled()) 5605 return true; 5606 5607 for (; memcg; memcg = parent_mem_cgroup(memcg)) 5608 if (!READ_ONCE(memcg->zswap_writeback)) 5609 return false; 5610 5611 return true; 5612 } 5613 5614 static u64 zswap_current_read(struct cgroup_subsys_state *css, 5615 struct cftype *cft) 5616 { 5617 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5618 5619 mem_cgroup_flush_stats(memcg); 5620 return memcg_page_state(memcg, MEMCG_ZSWAP_B); 5621 } 5622 5623 static int zswap_max_show(struct seq_file *m, void *v) 5624 { 5625 return seq_puts_memcg_tunable(m, 5626 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 5627 } 5628 5629 static ssize_t zswap_max_write(struct kernfs_open_file *of, 5630 char *buf, size_t nbytes, loff_t off) 5631 { 5632 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5633 unsigned long max; 5634 int err; 5635 5636 buf = strstrip(buf); 5637 err = page_counter_memparse(buf, "max", &max); 5638 if (err) 5639 return err; 5640 5641 xchg(&memcg->zswap_max, max); 5642 5643 return nbytes; 5644 } 5645 5646 static int zswap_writeback_show(struct seq_file *m, void *v) 5647 { 5648 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5649 5650 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); 5651 return 0; 5652 } 5653 5654 static ssize_t zswap_writeback_write(struct kernfs_open_file *of, 5655 char *buf, size_t nbytes, loff_t off) 5656 { 5657 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5658 int zswap_writeback; 5659 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback); 5660 5661 if (parse_ret) 5662 return parse_ret; 5663 5664 if (zswap_writeback != 0 && zswap_writeback != 1) 5665 return -EINVAL; 5666 5667 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); 5668 return nbytes; 5669 } 5670 5671 static struct cftype zswap_files[] = { 5672 { 5673 .name = "zswap.current", 5674 .flags = CFTYPE_NOT_ON_ROOT, 5675 .read_u64 = zswap_current_read, 5676 }, 5677 { 5678 .name = "zswap.max", 5679 .flags = CFTYPE_NOT_ON_ROOT, 5680 .seq_show = zswap_max_show, 5681 .write = zswap_max_write, 5682 }, 5683 { 5684 .name = "zswap.writeback", 5685 .seq_show = zswap_writeback_show, 5686 .write = zswap_writeback_write, 5687 }, 5688 { } /* terminate */ 5689 }; 5690 #endif /* CONFIG_ZSWAP */ 5691 5692 static int __init mem_cgroup_swap_init(void) 5693 { 5694 if (mem_cgroup_disabled()) 5695 return 0; 5696 5697 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 5698 #ifdef CONFIG_MEMCG_V1 5699 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 5700 #endif 5701 #ifdef CONFIG_ZSWAP 5702 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 5703 #endif 5704 return 0; 5705 } 5706 subsys_initcall(mem_cgroup_swap_init); 5707 5708 #endif /* CONFIG_SWAP */ 5709 5710 void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask) 5711 { 5712 nodemask_t allowed; 5713 5714 if (!memcg) 5715 return; 5716 5717 /* 5718 * Since this interface is intended for use by migration paths, and 5719 * reclaim and migration are subject to race conditions such as changes 5720 * in effective_mems and hot-unpluging of nodes, inaccurate allowed 5721 * mask is acceptable. 5722 */ 5723 cpuset_nodes_allowed(memcg->css.cgroup, &allowed); 5724 nodes_and(*mask, *mask, allowed); 5725 } 5726 5727 void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) 5728 { 5729 if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5730 return; 5731 5732 if (!memcg) 5733 memcg = root_mem_cgroup; 5734 5735 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB", 5736 K(atomic_long_read(&memcg->memory.children_min_usage)), 5737 K(atomic_long_read(&memcg->memory.children_low_usage))); 5738 } 5739