1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/pagewalk.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include <linux/psi.h> 61 #include <linux/seq_buf.h> 62 #include "internal.h" 63 #include <net/sock.h> 64 #include <net/ip.h> 65 #include "slab.h" 66 67 #include <linux/uaccess.h> 68 69 #include <trace/events/vmscan.h> 70 71 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 72 EXPORT_SYMBOL(memory_cgrp_subsys); 73 74 struct mem_cgroup *root_mem_cgroup __read_mostly; 75 76 /* Active memory cgroup to use from an interrupt context */ 77 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 78 79 /* Socket memory accounting disabled? */ 80 static bool cgroup_memory_nosocket; 81 82 /* Kernel memory accounting disabled? */ 83 static bool cgroup_memory_nokmem; 84 85 /* Whether the swap controller is active */ 86 #ifdef CONFIG_MEMCG_SWAP 87 bool cgroup_memory_noswap __read_mostly; 88 #else 89 #define cgroup_memory_noswap 1 90 #endif 91 92 #ifdef CONFIG_CGROUP_WRITEBACK 93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 94 #endif 95 96 /* Whether legacy memory+swap accounting is active */ 97 static bool do_memsw_account(void) 98 { 99 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 100 } 101 102 #define THRESHOLDS_EVENTS_TARGET 128 103 #define SOFTLIMIT_EVENTS_TARGET 1024 104 105 /* 106 * Cgroups above their limits are maintained in a RB-Tree, independent of 107 * their hierarchy representation 108 */ 109 110 struct mem_cgroup_tree_per_node { 111 struct rb_root rb_root; 112 struct rb_node *rb_rightmost; 113 spinlock_t lock; 114 }; 115 116 struct mem_cgroup_tree { 117 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 118 }; 119 120 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 121 122 /* for OOM */ 123 struct mem_cgroup_eventfd_list { 124 struct list_head list; 125 struct eventfd_ctx *eventfd; 126 }; 127 128 /* 129 * cgroup_event represents events which userspace want to receive. 130 */ 131 struct mem_cgroup_event { 132 /* 133 * memcg which the event belongs to. 134 */ 135 struct mem_cgroup *memcg; 136 /* 137 * eventfd to signal userspace about the event. 138 */ 139 struct eventfd_ctx *eventfd; 140 /* 141 * Each of these stored in a list by the cgroup. 142 */ 143 struct list_head list; 144 /* 145 * register_event() callback will be used to add new userspace 146 * waiter for changes related to this event. Use eventfd_signal() 147 * on eventfd to send notification to userspace. 148 */ 149 int (*register_event)(struct mem_cgroup *memcg, 150 struct eventfd_ctx *eventfd, const char *args); 151 /* 152 * unregister_event() callback will be called when userspace closes 153 * the eventfd or on cgroup removing. This callback must be set, 154 * if you want provide notification functionality. 155 */ 156 void (*unregister_event)(struct mem_cgroup *memcg, 157 struct eventfd_ctx *eventfd); 158 /* 159 * All fields below needed to unregister event when 160 * userspace closes eventfd. 161 */ 162 poll_table pt; 163 wait_queue_head_t *wqh; 164 wait_queue_entry_t wait; 165 struct work_struct remove; 166 }; 167 168 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 170 171 /* Stuffs for move charges at task migration. */ 172 /* 173 * Types of charges to be moved. 174 */ 175 #define MOVE_ANON 0x1U 176 #define MOVE_FILE 0x2U 177 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 178 179 /* "mc" and its members are protected by cgroup_mutex */ 180 static struct move_charge_struct { 181 spinlock_t lock; /* for from, to */ 182 struct mm_struct *mm; 183 struct mem_cgroup *from; 184 struct mem_cgroup *to; 185 unsigned long flags; 186 unsigned long precharge; 187 unsigned long moved_charge; 188 unsigned long moved_swap; 189 struct task_struct *moving_task; /* a task moving charges */ 190 wait_queue_head_t waitq; /* a waitq for other context */ 191 } mc = { 192 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 193 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 194 }; 195 196 /* 197 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 198 * limit reclaim to prevent infinite loops, if they ever occur. 199 */ 200 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 201 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 202 203 /* for encoding cft->private value on file */ 204 enum res_type { 205 _MEM, 206 _MEMSWAP, 207 _OOM_TYPE, 208 _KMEM, 209 _TCP, 210 }; 211 212 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 213 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 214 #define MEMFILE_ATTR(val) ((val) & 0xffff) 215 /* Used for OOM nofiier */ 216 #define OOM_CONTROL (0) 217 218 /* 219 * Iteration constructs for visiting all cgroups (under a tree). If 220 * loops are exited prematurely (break), mem_cgroup_iter_break() must 221 * be used for reference counting. 222 */ 223 #define for_each_mem_cgroup_tree(iter, root) \ 224 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 225 iter != NULL; \ 226 iter = mem_cgroup_iter(root, iter, NULL)) 227 228 #define for_each_mem_cgroup(iter) \ 229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 230 iter != NULL; \ 231 iter = mem_cgroup_iter(NULL, iter, NULL)) 232 233 static inline bool should_force_charge(void) 234 { 235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 236 (current->flags & PF_EXITING); 237 } 238 239 /* Some nice accessors for the vmpressure. */ 240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 241 { 242 if (!memcg) 243 memcg = root_mem_cgroup; 244 return &memcg->vmpressure; 245 } 246 247 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 248 { 249 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 250 } 251 252 #ifdef CONFIG_MEMCG_KMEM 253 extern spinlock_t css_set_lock; 254 255 static void obj_cgroup_release(struct percpu_ref *ref) 256 { 257 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 258 struct mem_cgroup *memcg; 259 unsigned int nr_bytes; 260 unsigned int nr_pages; 261 unsigned long flags; 262 263 /* 264 * At this point all allocated objects are freed, and 265 * objcg->nr_charged_bytes can't have an arbitrary byte value. 266 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 267 * 268 * The following sequence can lead to it: 269 * 1) CPU0: objcg == stock->cached_objcg 270 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 271 * PAGE_SIZE bytes are charged 272 * 3) CPU1: a process from another memcg is allocating something, 273 * the stock if flushed, 274 * objcg->nr_charged_bytes = PAGE_SIZE - 92 275 * 5) CPU0: we do release this object, 276 * 92 bytes are added to stock->nr_bytes 277 * 6) CPU0: stock is flushed, 278 * 92 bytes are added to objcg->nr_charged_bytes 279 * 280 * In the result, nr_charged_bytes == PAGE_SIZE. 281 * This page will be uncharged in obj_cgroup_release(). 282 */ 283 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 284 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 285 nr_pages = nr_bytes >> PAGE_SHIFT; 286 287 spin_lock_irqsave(&css_set_lock, flags); 288 memcg = obj_cgroup_memcg(objcg); 289 if (nr_pages) 290 __memcg_kmem_uncharge(memcg, nr_pages); 291 list_del(&objcg->list); 292 mem_cgroup_put(memcg); 293 spin_unlock_irqrestore(&css_set_lock, flags); 294 295 percpu_ref_exit(ref); 296 kfree_rcu(objcg, rcu); 297 } 298 299 static struct obj_cgroup *obj_cgroup_alloc(void) 300 { 301 struct obj_cgroup *objcg; 302 int ret; 303 304 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 305 if (!objcg) 306 return NULL; 307 308 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 309 GFP_KERNEL); 310 if (ret) { 311 kfree(objcg); 312 return NULL; 313 } 314 INIT_LIST_HEAD(&objcg->list); 315 return objcg; 316 } 317 318 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 319 struct mem_cgroup *parent) 320 { 321 struct obj_cgroup *objcg, *iter; 322 323 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 324 325 spin_lock_irq(&css_set_lock); 326 327 /* Move active objcg to the parent's list */ 328 xchg(&objcg->memcg, parent); 329 css_get(&parent->css); 330 list_add(&objcg->list, &parent->objcg_list); 331 332 /* Move already reparented objcgs to the parent's list */ 333 list_for_each_entry(iter, &memcg->objcg_list, list) { 334 css_get(&parent->css); 335 xchg(&iter->memcg, parent); 336 css_put(&memcg->css); 337 } 338 list_splice(&memcg->objcg_list, &parent->objcg_list); 339 340 spin_unlock_irq(&css_set_lock); 341 342 percpu_ref_kill(&objcg->refcnt); 343 } 344 345 /* 346 * This will be used as a shrinker list's index. 347 * The main reason for not using cgroup id for this: 348 * this works better in sparse environments, where we have a lot of memcgs, 349 * but only a few kmem-limited. Or also, if we have, for instance, 200 350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 351 * 200 entry array for that. 352 * 353 * The current size of the caches array is stored in memcg_nr_cache_ids. It 354 * will double each time we have to increase it. 355 */ 356 static DEFINE_IDA(memcg_cache_ida); 357 int memcg_nr_cache_ids; 358 359 /* Protects memcg_nr_cache_ids */ 360 static DECLARE_RWSEM(memcg_cache_ids_sem); 361 362 void memcg_get_cache_ids(void) 363 { 364 down_read(&memcg_cache_ids_sem); 365 } 366 367 void memcg_put_cache_ids(void) 368 { 369 up_read(&memcg_cache_ids_sem); 370 } 371 372 /* 373 * MIN_SIZE is different than 1, because we would like to avoid going through 374 * the alloc/free process all the time. In a small machine, 4 kmem-limited 375 * cgroups is a reasonable guess. In the future, it could be a parameter or 376 * tunable, but that is strictly not necessary. 377 * 378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 379 * this constant directly from cgroup, but it is understandable that this is 380 * better kept as an internal representation in cgroup.c. In any case, the 381 * cgrp_id space is not getting any smaller, and we don't have to necessarily 382 * increase ours as well if it increases. 383 */ 384 #define MEMCG_CACHES_MIN_SIZE 4 385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 386 387 /* 388 * A lot of the calls to the cache allocation functions are expected to be 389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 390 * conditional to this static branch, we'll have to allow modules that does 391 * kmem_cache_alloc and the such to see this symbol as well 392 */ 393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 394 EXPORT_SYMBOL(memcg_kmem_enabled_key); 395 #endif 396 397 static int memcg_shrinker_map_size; 398 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 399 400 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 401 { 402 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 403 } 404 405 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 406 int size, int old_size) 407 { 408 struct memcg_shrinker_map *new, *old; 409 int nid; 410 411 lockdep_assert_held(&memcg_shrinker_map_mutex); 412 413 for_each_node(nid) { 414 old = rcu_dereference_protected( 415 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 416 /* Not yet online memcg */ 417 if (!old) 418 return 0; 419 420 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 421 if (!new) 422 return -ENOMEM; 423 424 /* Set all old bits, clear all new bits */ 425 memset(new->map, (int)0xff, old_size); 426 memset((void *)new->map + old_size, 0, size - old_size); 427 428 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 429 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 430 } 431 432 return 0; 433 } 434 435 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 436 { 437 struct mem_cgroup_per_node *pn; 438 struct memcg_shrinker_map *map; 439 int nid; 440 441 if (mem_cgroup_is_root(memcg)) 442 return; 443 444 for_each_node(nid) { 445 pn = mem_cgroup_nodeinfo(memcg, nid); 446 map = rcu_dereference_protected(pn->shrinker_map, true); 447 if (map) 448 kvfree(map); 449 rcu_assign_pointer(pn->shrinker_map, NULL); 450 } 451 } 452 453 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 454 { 455 struct memcg_shrinker_map *map; 456 int nid, size, ret = 0; 457 458 if (mem_cgroup_is_root(memcg)) 459 return 0; 460 461 mutex_lock(&memcg_shrinker_map_mutex); 462 size = memcg_shrinker_map_size; 463 for_each_node(nid) { 464 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 465 if (!map) { 466 memcg_free_shrinker_maps(memcg); 467 ret = -ENOMEM; 468 break; 469 } 470 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 471 } 472 mutex_unlock(&memcg_shrinker_map_mutex); 473 474 return ret; 475 } 476 477 int memcg_expand_shrinker_maps(int new_id) 478 { 479 int size, old_size, ret = 0; 480 struct mem_cgroup *memcg; 481 482 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 483 old_size = memcg_shrinker_map_size; 484 if (size <= old_size) 485 return 0; 486 487 mutex_lock(&memcg_shrinker_map_mutex); 488 if (!root_mem_cgroup) 489 goto unlock; 490 491 for_each_mem_cgroup(memcg) { 492 if (mem_cgroup_is_root(memcg)) 493 continue; 494 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 495 if (ret) { 496 mem_cgroup_iter_break(NULL, memcg); 497 goto unlock; 498 } 499 } 500 unlock: 501 if (!ret) 502 memcg_shrinker_map_size = size; 503 mutex_unlock(&memcg_shrinker_map_mutex); 504 return ret; 505 } 506 507 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 508 { 509 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 510 struct memcg_shrinker_map *map; 511 512 rcu_read_lock(); 513 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 514 /* Pairs with smp mb in shrink_slab() */ 515 smp_mb__before_atomic(); 516 set_bit(shrinker_id, map->map); 517 rcu_read_unlock(); 518 } 519 } 520 521 /** 522 * mem_cgroup_css_from_page - css of the memcg associated with a page 523 * @page: page of interest 524 * 525 * If memcg is bound to the default hierarchy, css of the memcg associated 526 * with @page is returned. The returned css remains associated with @page 527 * until it is released. 528 * 529 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 530 * is returned. 531 */ 532 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 533 { 534 struct mem_cgroup *memcg; 535 536 memcg = page_memcg(page); 537 538 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 539 memcg = root_mem_cgroup; 540 541 return &memcg->css; 542 } 543 544 /** 545 * page_cgroup_ino - return inode number of the memcg a page is charged to 546 * @page: the page 547 * 548 * Look up the closest online ancestor of the memory cgroup @page is charged to 549 * and return its inode number or 0 if @page is not charged to any cgroup. It 550 * is safe to call this function without holding a reference to @page. 551 * 552 * Note, this function is inherently racy, because there is nothing to prevent 553 * the cgroup inode from getting torn down and potentially reallocated a moment 554 * after page_cgroup_ino() returns, so it only should be used by callers that 555 * do not care (such as procfs interfaces). 556 */ 557 ino_t page_cgroup_ino(struct page *page) 558 { 559 struct mem_cgroup *memcg; 560 unsigned long ino = 0; 561 562 rcu_read_lock(); 563 memcg = page_memcg_check(page); 564 565 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 566 memcg = parent_mem_cgroup(memcg); 567 if (memcg) 568 ino = cgroup_ino(memcg->css.cgroup); 569 rcu_read_unlock(); 570 return ino; 571 } 572 573 static struct mem_cgroup_per_node * 574 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 575 { 576 int nid = page_to_nid(page); 577 578 return memcg->nodeinfo[nid]; 579 } 580 581 static struct mem_cgroup_tree_per_node * 582 soft_limit_tree_node(int nid) 583 { 584 return soft_limit_tree.rb_tree_per_node[nid]; 585 } 586 587 static struct mem_cgroup_tree_per_node * 588 soft_limit_tree_from_page(struct page *page) 589 { 590 int nid = page_to_nid(page); 591 592 return soft_limit_tree.rb_tree_per_node[nid]; 593 } 594 595 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 596 struct mem_cgroup_tree_per_node *mctz, 597 unsigned long new_usage_in_excess) 598 { 599 struct rb_node **p = &mctz->rb_root.rb_node; 600 struct rb_node *parent = NULL; 601 struct mem_cgroup_per_node *mz_node; 602 bool rightmost = true; 603 604 if (mz->on_tree) 605 return; 606 607 mz->usage_in_excess = new_usage_in_excess; 608 if (!mz->usage_in_excess) 609 return; 610 while (*p) { 611 parent = *p; 612 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 613 tree_node); 614 if (mz->usage_in_excess < mz_node->usage_in_excess) { 615 p = &(*p)->rb_left; 616 rightmost = false; 617 } 618 619 /* 620 * We can't avoid mem cgroups that are over their soft 621 * limit by the same amount 622 */ 623 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 624 p = &(*p)->rb_right; 625 } 626 627 if (rightmost) 628 mctz->rb_rightmost = &mz->tree_node; 629 630 rb_link_node(&mz->tree_node, parent, p); 631 rb_insert_color(&mz->tree_node, &mctz->rb_root); 632 mz->on_tree = true; 633 } 634 635 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 636 struct mem_cgroup_tree_per_node *mctz) 637 { 638 if (!mz->on_tree) 639 return; 640 641 if (&mz->tree_node == mctz->rb_rightmost) 642 mctz->rb_rightmost = rb_prev(&mz->tree_node); 643 644 rb_erase(&mz->tree_node, &mctz->rb_root); 645 mz->on_tree = false; 646 } 647 648 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 649 struct mem_cgroup_tree_per_node *mctz) 650 { 651 unsigned long flags; 652 653 spin_lock_irqsave(&mctz->lock, flags); 654 __mem_cgroup_remove_exceeded(mz, mctz); 655 spin_unlock_irqrestore(&mctz->lock, flags); 656 } 657 658 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 659 { 660 unsigned long nr_pages = page_counter_read(&memcg->memory); 661 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 662 unsigned long excess = 0; 663 664 if (nr_pages > soft_limit) 665 excess = nr_pages - soft_limit; 666 667 return excess; 668 } 669 670 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 671 { 672 unsigned long excess; 673 struct mem_cgroup_per_node *mz; 674 struct mem_cgroup_tree_per_node *mctz; 675 676 mctz = soft_limit_tree_from_page(page); 677 if (!mctz) 678 return; 679 /* 680 * Necessary to update all ancestors when hierarchy is used. 681 * because their event counter is not touched. 682 */ 683 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 684 mz = mem_cgroup_page_nodeinfo(memcg, page); 685 excess = soft_limit_excess(memcg); 686 /* 687 * We have to update the tree if mz is on RB-tree or 688 * mem is over its softlimit. 689 */ 690 if (excess || mz->on_tree) { 691 unsigned long flags; 692 693 spin_lock_irqsave(&mctz->lock, flags); 694 /* if on-tree, remove it */ 695 if (mz->on_tree) 696 __mem_cgroup_remove_exceeded(mz, mctz); 697 /* 698 * Insert again. mz->usage_in_excess will be updated. 699 * If excess is 0, no tree ops. 700 */ 701 __mem_cgroup_insert_exceeded(mz, mctz, excess); 702 spin_unlock_irqrestore(&mctz->lock, flags); 703 } 704 } 705 } 706 707 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 708 { 709 struct mem_cgroup_tree_per_node *mctz; 710 struct mem_cgroup_per_node *mz; 711 int nid; 712 713 for_each_node(nid) { 714 mz = mem_cgroup_nodeinfo(memcg, nid); 715 mctz = soft_limit_tree_node(nid); 716 if (mctz) 717 mem_cgroup_remove_exceeded(mz, mctz); 718 } 719 } 720 721 static struct mem_cgroup_per_node * 722 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 723 { 724 struct mem_cgroup_per_node *mz; 725 726 retry: 727 mz = NULL; 728 if (!mctz->rb_rightmost) 729 goto done; /* Nothing to reclaim from */ 730 731 mz = rb_entry(mctz->rb_rightmost, 732 struct mem_cgroup_per_node, tree_node); 733 /* 734 * Remove the node now but someone else can add it back, 735 * we will to add it back at the end of reclaim to its correct 736 * position in the tree. 737 */ 738 __mem_cgroup_remove_exceeded(mz, mctz); 739 if (!soft_limit_excess(mz->memcg) || 740 !css_tryget(&mz->memcg->css)) 741 goto retry; 742 done: 743 return mz; 744 } 745 746 static struct mem_cgroup_per_node * 747 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 748 { 749 struct mem_cgroup_per_node *mz; 750 751 spin_lock_irq(&mctz->lock); 752 mz = __mem_cgroup_largest_soft_limit_node(mctz); 753 spin_unlock_irq(&mctz->lock); 754 return mz; 755 } 756 757 /** 758 * __mod_memcg_state - update cgroup memory statistics 759 * @memcg: the memory cgroup 760 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 761 * @val: delta to add to the counter, can be negative 762 */ 763 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 764 { 765 long x, threshold = MEMCG_CHARGE_BATCH; 766 767 if (mem_cgroup_disabled()) 768 return; 769 770 if (memcg_stat_item_in_bytes(idx)) 771 threshold <<= PAGE_SHIFT; 772 773 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 774 if (unlikely(abs(x) > threshold)) { 775 struct mem_cgroup *mi; 776 777 /* 778 * Batch local counters to keep them in sync with 779 * the hierarchical ones. 780 */ 781 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 782 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 783 atomic_long_add(x, &mi->vmstats[idx]); 784 x = 0; 785 } 786 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 787 } 788 789 static struct mem_cgroup_per_node * 790 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 791 { 792 struct mem_cgroup *parent; 793 794 parent = parent_mem_cgroup(pn->memcg); 795 if (!parent) 796 return NULL; 797 return mem_cgroup_nodeinfo(parent, nid); 798 } 799 800 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 801 int val) 802 { 803 struct mem_cgroup_per_node *pn; 804 struct mem_cgroup *memcg; 805 long x, threshold = MEMCG_CHARGE_BATCH; 806 807 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 808 memcg = pn->memcg; 809 810 /* Update memcg */ 811 __mod_memcg_state(memcg, idx, val); 812 813 /* Update lruvec */ 814 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 815 816 if (vmstat_item_in_bytes(idx)) 817 threshold <<= PAGE_SHIFT; 818 819 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 820 if (unlikely(abs(x) > threshold)) { 821 pg_data_t *pgdat = lruvec_pgdat(lruvec); 822 struct mem_cgroup_per_node *pi; 823 824 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 825 atomic_long_add(x, &pi->lruvec_stat[idx]); 826 x = 0; 827 } 828 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 829 } 830 831 /** 832 * __mod_lruvec_state - update lruvec memory statistics 833 * @lruvec: the lruvec 834 * @idx: the stat item 835 * @val: delta to add to the counter, can be negative 836 * 837 * The lruvec is the intersection of the NUMA node and a cgroup. This 838 * function updates the all three counters that are affected by a 839 * change of state at this level: per-node, per-cgroup, per-lruvec. 840 */ 841 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 842 int val) 843 { 844 /* Update node */ 845 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 846 847 /* Update memcg and lruvec */ 848 if (!mem_cgroup_disabled()) 849 __mod_memcg_lruvec_state(lruvec, idx, val); 850 } 851 852 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 853 { 854 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 855 struct mem_cgroup *memcg; 856 struct lruvec *lruvec; 857 858 rcu_read_lock(); 859 memcg = mem_cgroup_from_obj(p); 860 861 /* 862 * Untracked pages have no memcg, no lruvec. Update only the 863 * node. If we reparent the slab objects to the root memcg, 864 * when we free the slab object, we need to update the per-memcg 865 * vmstats to keep it correct for the root memcg. 866 */ 867 if (!memcg) { 868 __mod_node_page_state(pgdat, idx, val); 869 } else { 870 lruvec = mem_cgroup_lruvec(memcg, pgdat); 871 __mod_lruvec_state(lruvec, idx, val); 872 } 873 rcu_read_unlock(); 874 } 875 876 void mod_memcg_obj_state(void *p, int idx, int val) 877 { 878 struct mem_cgroup *memcg; 879 880 rcu_read_lock(); 881 memcg = mem_cgroup_from_obj(p); 882 if (memcg) 883 mod_memcg_state(memcg, idx, val); 884 rcu_read_unlock(); 885 } 886 887 /** 888 * __count_memcg_events - account VM events in a cgroup 889 * @memcg: the memory cgroup 890 * @idx: the event item 891 * @count: the number of events that occured 892 */ 893 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 894 unsigned long count) 895 { 896 unsigned long x; 897 898 if (mem_cgroup_disabled()) 899 return; 900 901 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 902 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 903 struct mem_cgroup *mi; 904 905 /* 906 * Batch local counters to keep them in sync with 907 * the hierarchical ones. 908 */ 909 __this_cpu_add(memcg->vmstats_local->events[idx], x); 910 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 911 atomic_long_add(x, &mi->vmevents[idx]); 912 x = 0; 913 } 914 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 915 } 916 917 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 918 { 919 return atomic_long_read(&memcg->vmevents[event]); 920 } 921 922 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 923 { 924 long x = 0; 925 int cpu; 926 927 for_each_possible_cpu(cpu) 928 x += per_cpu(memcg->vmstats_local->events[event], cpu); 929 return x; 930 } 931 932 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 933 struct page *page, 934 int nr_pages) 935 { 936 /* pagein of a big page is an event. So, ignore page size */ 937 if (nr_pages > 0) 938 __count_memcg_events(memcg, PGPGIN, 1); 939 else { 940 __count_memcg_events(memcg, PGPGOUT, 1); 941 nr_pages = -nr_pages; /* for event */ 942 } 943 944 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 945 } 946 947 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 948 enum mem_cgroup_events_target target) 949 { 950 unsigned long val, next; 951 952 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 953 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 954 /* from time_after() in jiffies.h */ 955 if ((long)(next - val) < 0) { 956 switch (target) { 957 case MEM_CGROUP_TARGET_THRESH: 958 next = val + THRESHOLDS_EVENTS_TARGET; 959 break; 960 case MEM_CGROUP_TARGET_SOFTLIMIT: 961 next = val + SOFTLIMIT_EVENTS_TARGET; 962 break; 963 default: 964 break; 965 } 966 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 967 return true; 968 } 969 return false; 970 } 971 972 /* 973 * Check events in order. 974 * 975 */ 976 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 977 { 978 /* threshold event is triggered in finer grain than soft limit */ 979 if (unlikely(mem_cgroup_event_ratelimit(memcg, 980 MEM_CGROUP_TARGET_THRESH))) { 981 bool do_softlimit; 982 983 do_softlimit = mem_cgroup_event_ratelimit(memcg, 984 MEM_CGROUP_TARGET_SOFTLIMIT); 985 mem_cgroup_threshold(memcg); 986 if (unlikely(do_softlimit)) 987 mem_cgroup_update_tree(memcg, page); 988 } 989 } 990 991 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 992 { 993 /* 994 * mm_update_next_owner() may clear mm->owner to NULL 995 * if it races with swapoff, page migration, etc. 996 * So this can be called with p == NULL. 997 */ 998 if (unlikely(!p)) 999 return NULL; 1000 1001 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1002 } 1003 EXPORT_SYMBOL(mem_cgroup_from_task); 1004 1005 /** 1006 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1007 * @mm: mm from which memcg should be extracted. It can be NULL. 1008 * 1009 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 1010 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 1011 * returned. 1012 */ 1013 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1014 { 1015 struct mem_cgroup *memcg; 1016 1017 if (mem_cgroup_disabled()) 1018 return NULL; 1019 1020 rcu_read_lock(); 1021 do { 1022 /* 1023 * Page cache insertions can happen withou an 1024 * actual mm context, e.g. during disk probing 1025 * on boot, loopback IO, acct() writes etc. 1026 */ 1027 if (unlikely(!mm)) 1028 memcg = root_mem_cgroup; 1029 else { 1030 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1031 if (unlikely(!memcg)) 1032 memcg = root_mem_cgroup; 1033 } 1034 } while (!css_tryget(&memcg->css)); 1035 rcu_read_unlock(); 1036 return memcg; 1037 } 1038 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1039 1040 /** 1041 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 1042 * @page: page from which memcg should be extracted. 1043 * 1044 * Obtain a reference on page->memcg and returns it if successful. Otherwise 1045 * root_mem_cgroup is returned. 1046 */ 1047 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 1048 { 1049 struct mem_cgroup *memcg = page_memcg(page); 1050 1051 if (mem_cgroup_disabled()) 1052 return NULL; 1053 1054 rcu_read_lock(); 1055 /* Page should not get uncharged and freed memcg under us. */ 1056 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 1057 memcg = root_mem_cgroup; 1058 rcu_read_unlock(); 1059 return memcg; 1060 } 1061 EXPORT_SYMBOL(get_mem_cgroup_from_page); 1062 1063 static __always_inline struct mem_cgroup *active_memcg(void) 1064 { 1065 if (in_interrupt()) 1066 return this_cpu_read(int_active_memcg); 1067 else 1068 return current->active_memcg; 1069 } 1070 1071 static __always_inline struct mem_cgroup *get_active_memcg(void) 1072 { 1073 struct mem_cgroup *memcg; 1074 1075 rcu_read_lock(); 1076 memcg = active_memcg(); 1077 if (memcg) { 1078 /* current->active_memcg must hold a ref. */ 1079 if (WARN_ON_ONCE(!css_tryget(&memcg->css))) 1080 memcg = root_mem_cgroup; 1081 else 1082 memcg = current->active_memcg; 1083 } 1084 rcu_read_unlock(); 1085 1086 return memcg; 1087 } 1088 1089 static __always_inline bool memcg_kmem_bypass(void) 1090 { 1091 /* Allow remote memcg charging from any context. */ 1092 if (unlikely(active_memcg())) 1093 return false; 1094 1095 /* Memcg to charge can't be determined. */ 1096 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 1097 return true; 1098 1099 return false; 1100 } 1101 1102 /** 1103 * If active memcg is set, do not fallback to current->mm->memcg. 1104 */ 1105 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1106 { 1107 if (memcg_kmem_bypass()) 1108 return NULL; 1109 1110 if (unlikely(active_memcg())) 1111 return get_active_memcg(); 1112 1113 return get_mem_cgroup_from_mm(current->mm); 1114 } 1115 1116 /** 1117 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1118 * @root: hierarchy root 1119 * @prev: previously returned memcg, NULL on first invocation 1120 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1121 * 1122 * Returns references to children of the hierarchy below @root, or 1123 * @root itself, or %NULL after a full round-trip. 1124 * 1125 * Caller must pass the return value in @prev on subsequent 1126 * invocations for reference counting, or use mem_cgroup_iter_break() 1127 * to cancel a hierarchy walk before the round-trip is complete. 1128 * 1129 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1130 * in the hierarchy among all concurrent reclaimers operating on the 1131 * same node. 1132 */ 1133 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1134 struct mem_cgroup *prev, 1135 struct mem_cgroup_reclaim_cookie *reclaim) 1136 { 1137 struct mem_cgroup_reclaim_iter *iter; 1138 struct cgroup_subsys_state *css = NULL; 1139 struct mem_cgroup *memcg = NULL; 1140 struct mem_cgroup *pos = NULL; 1141 1142 if (mem_cgroup_disabled()) 1143 return NULL; 1144 1145 if (!root) 1146 root = root_mem_cgroup; 1147 1148 if (prev && !reclaim) 1149 pos = prev; 1150 1151 if (!root->use_hierarchy && root != root_mem_cgroup) { 1152 if (prev) 1153 goto out; 1154 return root; 1155 } 1156 1157 rcu_read_lock(); 1158 1159 if (reclaim) { 1160 struct mem_cgroup_per_node *mz; 1161 1162 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1163 iter = &mz->iter; 1164 1165 if (prev && reclaim->generation != iter->generation) 1166 goto out_unlock; 1167 1168 while (1) { 1169 pos = READ_ONCE(iter->position); 1170 if (!pos || css_tryget(&pos->css)) 1171 break; 1172 /* 1173 * css reference reached zero, so iter->position will 1174 * be cleared by ->css_released. However, we should not 1175 * rely on this happening soon, because ->css_released 1176 * is called from a work queue, and by busy-waiting we 1177 * might block it. So we clear iter->position right 1178 * away. 1179 */ 1180 (void)cmpxchg(&iter->position, pos, NULL); 1181 } 1182 } 1183 1184 if (pos) 1185 css = &pos->css; 1186 1187 for (;;) { 1188 css = css_next_descendant_pre(css, &root->css); 1189 if (!css) { 1190 /* 1191 * Reclaimers share the hierarchy walk, and a 1192 * new one might jump in right at the end of 1193 * the hierarchy - make sure they see at least 1194 * one group and restart from the beginning. 1195 */ 1196 if (!prev) 1197 continue; 1198 break; 1199 } 1200 1201 /* 1202 * Verify the css and acquire a reference. The root 1203 * is provided by the caller, so we know it's alive 1204 * and kicking, and don't take an extra reference. 1205 */ 1206 memcg = mem_cgroup_from_css(css); 1207 1208 if (css == &root->css) 1209 break; 1210 1211 if (css_tryget(css)) 1212 break; 1213 1214 memcg = NULL; 1215 } 1216 1217 if (reclaim) { 1218 /* 1219 * The position could have already been updated by a competing 1220 * thread, so check that the value hasn't changed since we read 1221 * it to avoid reclaiming from the same cgroup twice. 1222 */ 1223 (void)cmpxchg(&iter->position, pos, memcg); 1224 1225 if (pos) 1226 css_put(&pos->css); 1227 1228 if (!memcg) 1229 iter->generation++; 1230 else if (!prev) 1231 reclaim->generation = iter->generation; 1232 } 1233 1234 out_unlock: 1235 rcu_read_unlock(); 1236 out: 1237 if (prev && prev != root) 1238 css_put(&prev->css); 1239 1240 return memcg; 1241 } 1242 1243 /** 1244 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1245 * @root: hierarchy root 1246 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1247 */ 1248 void mem_cgroup_iter_break(struct mem_cgroup *root, 1249 struct mem_cgroup *prev) 1250 { 1251 if (!root) 1252 root = root_mem_cgroup; 1253 if (prev && prev != root) 1254 css_put(&prev->css); 1255 } 1256 1257 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1258 struct mem_cgroup *dead_memcg) 1259 { 1260 struct mem_cgroup_reclaim_iter *iter; 1261 struct mem_cgroup_per_node *mz; 1262 int nid; 1263 1264 for_each_node(nid) { 1265 mz = mem_cgroup_nodeinfo(from, nid); 1266 iter = &mz->iter; 1267 cmpxchg(&iter->position, dead_memcg, NULL); 1268 } 1269 } 1270 1271 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1272 { 1273 struct mem_cgroup *memcg = dead_memcg; 1274 struct mem_cgroup *last; 1275 1276 do { 1277 __invalidate_reclaim_iterators(memcg, dead_memcg); 1278 last = memcg; 1279 } while ((memcg = parent_mem_cgroup(memcg))); 1280 1281 /* 1282 * When cgruop1 non-hierarchy mode is used, 1283 * parent_mem_cgroup() does not walk all the way up to the 1284 * cgroup root (root_mem_cgroup). So we have to handle 1285 * dead_memcg from cgroup root separately. 1286 */ 1287 if (last != root_mem_cgroup) 1288 __invalidate_reclaim_iterators(root_mem_cgroup, 1289 dead_memcg); 1290 } 1291 1292 /** 1293 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1294 * @memcg: hierarchy root 1295 * @fn: function to call for each task 1296 * @arg: argument passed to @fn 1297 * 1298 * This function iterates over tasks attached to @memcg or to any of its 1299 * descendants and calls @fn for each task. If @fn returns a non-zero 1300 * value, the function breaks the iteration loop and returns the value. 1301 * Otherwise, it will iterate over all tasks and return 0. 1302 * 1303 * This function must not be called for the root memory cgroup. 1304 */ 1305 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1306 int (*fn)(struct task_struct *, void *), void *arg) 1307 { 1308 struct mem_cgroup *iter; 1309 int ret = 0; 1310 1311 BUG_ON(memcg == root_mem_cgroup); 1312 1313 for_each_mem_cgroup_tree(iter, memcg) { 1314 struct css_task_iter it; 1315 struct task_struct *task; 1316 1317 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1318 while (!ret && (task = css_task_iter_next(&it))) 1319 ret = fn(task, arg); 1320 css_task_iter_end(&it); 1321 if (ret) { 1322 mem_cgroup_iter_break(memcg, iter); 1323 break; 1324 } 1325 } 1326 return ret; 1327 } 1328 1329 /** 1330 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1331 * @page: the page 1332 * @pgdat: pgdat of the page 1333 * 1334 * This function relies on page->mem_cgroup being stable - see the 1335 * access rules in commit_charge(). 1336 */ 1337 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1338 { 1339 struct mem_cgroup_per_node *mz; 1340 struct mem_cgroup *memcg; 1341 struct lruvec *lruvec; 1342 1343 if (mem_cgroup_disabled()) { 1344 lruvec = &pgdat->__lruvec; 1345 goto out; 1346 } 1347 1348 memcg = page_memcg(page); 1349 /* 1350 * Swapcache readahead pages are added to the LRU - and 1351 * possibly migrated - before they are charged. 1352 */ 1353 if (!memcg) 1354 memcg = root_mem_cgroup; 1355 1356 mz = mem_cgroup_page_nodeinfo(memcg, page); 1357 lruvec = &mz->lruvec; 1358 out: 1359 /* 1360 * Since a node can be onlined after the mem_cgroup was created, 1361 * we have to be prepared to initialize lruvec->zone here; 1362 * and if offlined then reonlined, we need to reinitialize it. 1363 */ 1364 if (unlikely(lruvec->pgdat != pgdat)) 1365 lruvec->pgdat = pgdat; 1366 return lruvec; 1367 } 1368 1369 /** 1370 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1371 * @lruvec: mem_cgroup per zone lru vector 1372 * @lru: index of lru list the page is sitting on 1373 * @zid: zone id of the accounted pages 1374 * @nr_pages: positive when adding or negative when removing 1375 * 1376 * This function must be called under lru_lock, just before a page is added 1377 * to or just after a page is removed from an lru list (that ordering being 1378 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1379 */ 1380 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1381 int zid, int nr_pages) 1382 { 1383 struct mem_cgroup_per_node *mz; 1384 unsigned long *lru_size; 1385 long size; 1386 1387 if (mem_cgroup_disabled()) 1388 return; 1389 1390 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1391 lru_size = &mz->lru_zone_size[zid][lru]; 1392 1393 if (nr_pages < 0) 1394 *lru_size += nr_pages; 1395 1396 size = *lru_size; 1397 if (WARN_ONCE(size < 0, 1398 "%s(%p, %d, %d): lru_size %ld\n", 1399 __func__, lruvec, lru, nr_pages, size)) { 1400 VM_BUG_ON(1); 1401 *lru_size = 0; 1402 } 1403 1404 if (nr_pages > 0) 1405 *lru_size += nr_pages; 1406 } 1407 1408 /** 1409 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1410 * @memcg: the memory cgroup 1411 * 1412 * Returns the maximum amount of memory @mem can be charged with, in 1413 * pages. 1414 */ 1415 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1416 { 1417 unsigned long margin = 0; 1418 unsigned long count; 1419 unsigned long limit; 1420 1421 count = page_counter_read(&memcg->memory); 1422 limit = READ_ONCE(memcg->memory.max); 1423 if (count < limit) 1424 margin = limit - count; 1425 1426 if (do_memsw_account()) { 1427 count = page_counter_read(&memcg->memsw); 1428 limit = READ_ONCE(memcg->memsw.max); 1429 if (count < limit) 1430 margin = min(margin, limit - count); 1431 else 1432 margin = 0; 1433 } 1434 1435 return margin; 1436 } 1437 1438 /* 1439 * A routine for checking "mem" is under move_account() or not. 1440 * 1441 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1442 * moving cgroups. This is for waiting at high-memory pressure 1443 * caused by "move". 1444 */ 1445 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1446 { 1447 struct mem_cgroup *from; 1448 struct mem_cgroup *to; 1449 bool ret = false; 1450 /* 1451 * Unlike task_move routines, we access mc.to, mc.from not under 1452 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1453 */ 1454 spin_lock(&mc.lock); 1455 from = mc.from; 1456 to = mc.to; 1457 if (!from) 1458 goto unlock; 1459 1460 ret = mem_cgroup_is_descendant(from, memcg) || 1461 mem_cgroup_is_descendant(to, memcg); 1462 unlock: 1463 spin_unlock(&mc.lock); 1464 return ret; 1465 } 1466 1467 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1468 { 1469 if (mc.moving_task && current != mc.moving_task) { 1470 if (mem_cgroup_under_move(memcg)) { 1471 DEFINE_WAIT(wait); 1472 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1473 /* moving charge context might have finished. */ 1474 if (mc.moving_task) 1475 schedule(); 1476 finish_wait(&mc.waitq, &wait); 1477 return true; 1478 } 1479 } 1480 return false; 1481 } 1482 1483 struct memory_stat { 1484 const char *name; 1485 unsigned int ratio; 1486 unsigned int idx; 1487 }; 1488 1489 static struct memory_stat memory_stats[] = { 1490 { "anon", PAGE_SIZE, NR_ANON_MAPPED }, 1491 { "file", PAGE_SIZE, NR_FILE_PAGES }, 1492 { "kernel_stack", 1024, NR_KERNEL_STACK_KB }, 1493 { "percpu", 1, MEMCG_PERCPU_B }, 1494 { "sock", PAGE_SIZE, MEMCG_SOCK }, 1495 { "shmem", PAGE_SIZE, NR_SHMEM }, 1496 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED }, 1497 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY }, 1498 { "file_writeback", PAGE_SIZE, NR_WRITEBACK }, 1499 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1500 /* 1501 * The ratio will be initialized in memory_stats_init(). Because 1502 * on some architectures, the macro of HPAGE_PMD_SIZE is not 1503 * constant(e.g. powerpc). 1504 */ 1505 { "anon_thp", 0, NR_ANON_THPS }, 1506 #endif 1507 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, 1508 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, 1509 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE }, 1510 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE }, 1511 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE }, 1512 1513 /* 1514 * Note: The slab_reclaimable and slab_unreclaimable must be 1515 * together and slab_reclaimable must be in front. 1516 */ 1517 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B }, 1518 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B }, 1519 1520 /* The memory events */ 1521 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON }, 1522 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE }, 1523 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON }, 1524 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE }, 1525 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON }, 1526 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE }, 1527 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM }, 1528 }; 1529 1530 static int __init memory_stats_init(void) 1531 { 1532 int i; 1533 1534 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1535 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1536 if (memory_stats[i].idx == NR_ANON_THPS) 1537 memory_stats[i].ratio = HPAGE_PMD_SIZE; 1538 #endif 1539 VM_BUG_ON(!memory_stats[i].ratio); 1540 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT); 1541 } 1542 1543 return 0; 1544 } 1545 pure_initcall(memory_stats_init); 1546 1547 static char *memory_stat_format(struct mem_cgroup *memcg) 1548 { 1549 struct seq_buf s; 1550 int i; 1551 1552 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1553 if (!s.buffer) 1554 return NULL; 1555 1556 /* 1557 * Provide statistics on the state of the memory subsystem as 1558 * well as cumulative event counters that show past behavior. 1559 * 1560 * This list is ordered following a combination of these gradients: 1561 * 1) generic big picture -> specifics and details 1562 * 2) reflecting userspace activity -> reflecting kernel heuristics 1563 * 1564 * Current memory state: 1565 */ 1566 1567 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1568 u64 size; 1569 1570 size = memcg_page_state(memcg, memory_stats[i].idx); 1571 size *= memory_stats[i].ratio; 1572 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1573 1574 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1575 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + 1576 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); 1577 seq_buf_printf(&s, "slab %llu\n", size); 1578 } 1579 } 1580 1581 /* Accumulated memory events */ 1582 1583 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1584 memcg_events(memcg, PGFAULT)); 1585 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1586 memcg_events(memcg, PGMAJFAULT)); 1587 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1588 memcg_events(memcg, PGREFILL)); 1589 seq_buf_printf(&s, "pgscan %lu\n", 1590 memcg_events(memcg, PGSCAN_KSWAPD) + 1591 memcg_events(memcg, PGSCAN_DIRECT)); 1592 seq_buf_printf(&s, "pgsteal %lu\n", 1593 memcg_events(memcg, PGSTEAL_KSWAPD) + 1594 memcg_events(memcg, PGSTEAL_DIRECT)); 1595 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1596 memcg_events(memcg, PGACTIVATE)); 1597 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1598 memcg_events(memcg, PGDEACTIVATE)); 1599 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1600 memcg_events(memcg, PGLAZYFREE)); 1601 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1602 memcg_events(memcg, PGLAZYFREED)); 1603 1604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1605 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1606 memcg_events(memcg, THP_FAULT_ALLOC)); 1607 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1608 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1609 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1610 1611 /* The above should easily fit into one page */ 1612 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1613 1614 return s.buffer; 1615 } 1616 1617 #define K(x) ((x) << (PAGE_SHIFT-10)) 1618 /** 1619 * mem_cgroup_print_oom_context: Print OOM information relevant to 1620 * memory controller. 1621 * @memcg: The memory cgroup that went over limit 1622 * @p: Task that is going to be killed 1623 * 1624 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1625 * enabled 1626 */ 1627 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1628 { 1629 rcu_read_lock(); 1630 1631 if (memcg) { 1632 pr_cont(",oom_memcg="); 1633 pr_cont_cgroup_path(memcg->css.cgroup); 1634 } else 1635 pr_cont(",global_oom"); 1636 if (p) { 1637 pr_cont(",task_memcg="); 1638 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1639 } 1640 rcu_read_unlock(); 1641 } 1642 1643 /** 1644 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1645 * memory controller. 1646 * @memcg: The memory cgroup that went over limit 1647 */ 1648 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1649 { 1650 char *buf; 1651 1652 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1653 K((u64)page_counter_read(&memcg->memory)), 1654 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1655 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1656 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1657 K((u64)page_counter_read(&memcg->swap)), 1658 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1659 else { 1660 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1661 K((u64)page_counter_read(&memcg->memsw)), 1662 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1663 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1664 K((u64)page_counter_read(&memcg->kmem)), 1665 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1666 } 1667 1668 pr_info("Memory cgroup stats for "); 1669 pr_cont_cgroup_path(memcg->css.cgroup); 1670 pr_cont(":"); 1671 buf = memory_stat_format(memcg); 1672 if (!buf) 1673 return; 1674 pr_info("%s", buf); 1675 kfree(buf); 1676 } 1677 1678 /* 1679 * Return the memory (and swap, if configured) limit for a memcg. 1680 */ 1681 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1682 { 1683 unsigned long max = READ_ONCE(memcg->memory.max); 1684 1685 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1686 if (mem_cgroup_swappiness(memcg)) 1687 max += min(READ_ONCE(memcg->swap.max), 1688 (unsigned long)total_swap_pages); 1689 } else { /* v1 */ 1690 if (mem_cgroup_swappiness(memcg)) { 1691 /* Calculate swap excess capacity from memsw limit */ 1692 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1693 1694 max += min(swap, (unsigned long)total_swap_pages); 1695 } 1696 } 1697 return max; 1698 } 1699 1700 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1701 { 1702 return page_counter_read(&memcg->memory); 1703 } 1704 1705 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1706 int order) 1707 { 1708 struct oom_control oc = { 1709 .zonelist = NULL, 1710 .nodemask = NULL, 1711 .memcg = memcg, 1712 .gfp_mask = gfp_mask, 1713 .order = order, 1714 }; 1715 bool ret = true; 1716 1717 if (mutex_lock_killable(&oom_lock)) 1718 return true; 1719 1720 if (mem_cgroup_margin(memcg) >= (1 << order)) 1721 goto unlock; 1722 1723 /* 1724 * A few threads which were not waiting at mutex_lock_killable() can 1725 * fail to bail out. Therefore, check again after holding oom_lock. 1726 */ 1727 ret = should_force_charge() || out_of_memory(&oc); 1728 1729 unlock: 1730 mutex_unlock(&oom_lock); 1731 return ret; 1732 } 1733 1734 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1735 pg_data_t *pgdat, 1736 gfp_t gfp_mask, 1737 unsigned long *total_scanned) 1738 { 1739 struct mem_cgroup *victim = NULL; 1740 int total = 0; 1741 int loop = 0; 1742 unsigned long excess; 1743 unsigned long nr_scanned; 1744 struct mem_cgroup_reclaim_cookie reclaim = { 1745 .pgdat = pgdat, 1746 }; 1747 1748 excess = soft_limit_excess(root_memcg); 1749 1750 while (1) { 1751 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1752 if (!victim) { 1753 loop++; 1754 if (loop >= 2) { 1755 /* 1756 * If we have not been able to reclaim 1757 * anything, it might because there are 1758 * no reclaimable pages under this hierarchy 1759 */ 1760 if (!total) 1761 break; 1762 /* 1763 * We want to do more targeted reclaim. 1764 * excess >> 2 is not to excessive so as to 1765 * reclaim too much, nor too less that we keep 1766 * coming back to reclaim from this cgroup 1767 */ 1768 if (total >= (excess >> 2) || 1769 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1770 break; 1771 } 1772 continue; 1773 } 1774 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1775 pgdat, &nr_scanned); 1776 *total_scanned += nr_scanned; 1777 if (!soft_limit_excess(root_memcg)) 1778 break; 1779 } 1780 mem_cgroup_iter_break(root_memcg, victim); 1781 return total; 1782 } 1783 1784 #ifdef CONFIG_LOCKDEP 1785 static struct lockdep_map memcg_oom_lock_dep_map = { 1786 .name = "memcg_oom_lock", 1787 }; 1788 #endif 1789 1790 static DEFINE_SPINLOCK(memcg_oom_lock); 1791 1792 /* 1793 * Check OOM-Killer is already running under our hierarchy. 1794 * If someone is running, return false. 1795 */ 1796 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1797 { 1798 struct mem_cgroup *iter, *failed = NULL; 1799 1800 spin_lock(&memcg_oom_lock); 1801 1802 for_each_mem_cgroup_tree(iter, memcg) { 1803 if (iter->oom_lock) { 1804 /* 1805 * this subtree of our hierarchy is already locked 1806 * so we cannot give a lock. 1807 */ 1808 failed = iter; 1809 mem_cgroup_iter_break(memcg, iter); 1810 break; 1811 } else 1812 iter->oom_lock = true; 1813 } 1814 1815 if (failed) { 1816 /* 1817 * OK, we failed to lock the whole subtree so we have 1818 * to clean up what we set up to the failing subtree 1819 */ 1820 for_each_mem_cgroup_tree(iter, memcg) { 1821 if (iter == failed) { 1822 mem_cgroup_iter_break(memcg, iter); 1823 break; 1824 } 1825 iter->oom_lock = false; 1826 } 1827 } else 1828 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1829 1830 spin_unlock(&memcg_oom_lock); 1831 1832 return !failed; 1833 } 1834 1835 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1836 { 1837 struct mem_cgroup *iter; 1838 1839 spin_lock(&memcg_oom_lock); 1840 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1841 for_each_mem_cgroup_tree(iter, memcg) 1842 iter->oom_lock = false; 1843 spin_unlock(&memcg_oom_lock); 1844 } 1845 1846 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1847 { 1848 struct mem_cgroup *iter; 1849 1850 spin_lock(&memcg_oom_lock); 1851 for_each_mem_cgroup_tree(iter, memcg) 1852 iter->under_oom++; 1853 spin_unlock(&memcg_oom_lock); 1854 } 1855 1856 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1857 { 1858 struct mem_cgroup *iter; 1859 1860 /* 1861 * Be careful about under_oom underflows becase a child memcg 1862 * could have been added after mem_cgroup_mark_under_oom. 1863 */ 1864 spin_lock(&memcg_oom_lock); 1865 for_each_mem_cgroup_tree(iter, memcg) 1866 if (iter->under_oom > 0) 1867 iter->under_oom--; 1868 spin_unlock(&memcg_oom_lock); 1869 } 1870 1871 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1872 1873 struct oom_wait_info { 1874 struct mem_cgroup *memcg; 1875 wait_queue_entry_t wait; 1876 }; 1877 1878 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1879 unsigned mode, int sync, void *arg) 1880 { 1881 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1882 struct mem_cgroup *oom_wait_memcg; 1883 struct oom_wait_info *oom_wait_info; 1884 1885 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1886 oom_wait_memcg = oom_wait_info->memcg; 1887 1888 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1889 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1890 return 0; 1891 return autoremove_wake_function(wait, mode, sync, arg); 1892 } 1893 1894 static void memcg_oom_recover(struct mem_cgroup *memcg) 1895 { 1896 /* 1897 * For the following lockless ->under_oom test, the only required 1898 * guarantee is that it must see the state asserted by an OOM when 1899 * this function is called as a result of userland actions 1900 * triggered by the notification of the OOM. This is trivially 1901 * achieved by invoking mem_cgroup_mark_under_oom() before 1902 * triggering notification. 1903 */ 1904 if (memcg && memcg->under_oom) 1905 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1906 } 1907 1908 enum oom_status { 1909 OOM_SUCCESS, 1910 OOM_FAILED, 1911 OOM_ASYNC, 1912 OOM_SKIPPED 1913 }; 1914 1915 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1916 { 1917 enum oom_status ret; 1918 bool locked; 1919 1920 if (order > PAGE_ALLOC_COSTLY_ORDER) 1921 return OOM_SKIPPED; 1922 1923 memcg_memory_event(memcg, MEMCG_OOM); 1924 1925 /* 1926 * We are in the middle of the charge context here, so we 1927 * don't want to block when potentially sitting on a callstack 1928 * that holds all kinds of filesystem and mm locks. 1929 * 1930 * cgroup1 allows disabling the OOM killer and waiting for outside 1931 * handling until the charge can succeed; remember the context and put 1932 * the task to sleep at the end of the page fault when all locks are 1933 * released. 1934 * 1935 * On the other hand, in-kernel OOM killer allows for an async victim 1936 * memory reclaim (oom_reaper) and that means that we are not solely 1937 * relying on the oom victim to make a forward progress and we can 1938 * invoke the oom killer here. 1939 * 1940 * Please note that mem_cgroup_out_of_memory might fail to find a 1941 * victim and then we have to bail out from the charge path. 1942 */ 1943 if (memcg->oom_kill_disable) { 1944 if (!current->in_user_fault) 1945 return OOM_SKIPPED; 1946 css_get(&memcg->css); 1947 current->memcg_in_oom = memcg; 1948 current->memcg_oom_gfp_mask = mask; 1949 current->memcg_oom_order = order; 1950 1951 return OOM_ASYNC; 1952 } 1953 1954 mem_cgroup_mark_under_oom(memcg); 1955 1956 locked = mem_cgroup_oom_trylock(memcg); 1957 1958 if (locked) 1959 mem_cgroup_oom_notify(memcg); 1960 1961 mem_cgroup_unmark_under_oom(memcg); 1962 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1963 ret = OOM_SUCCESS; 1964 else 1965 ret = OOM_FAILED; 1966 1967 if (locked) 1968 mem_cgroup_oom_unlock(memcg); 1969 1970 return ret; 1971 } 1972 1973 /** 1974 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1975 * @handle: actually kill/wait or just clean up the OOM state 1976 * 1977 * This has to be called at the end of a page fault if the memcg OOM 1978 * handler was enabled. 1979 * 1980 * Memcg supports userspace OOM handling where failed allocations must 1981 * sleep on a waitqueue until the userspace task resolves the 1982 * situation. Sleeping directly in the charge context with all kinds 1983 * of locks held is not a good idea, instead we remember an OOM state 1984 * in the task and mem_cgroup_oom_synchronize() has to be called at 1985 * the end of the page fault to complete the OOM handling. 1986 * 1987 * Returns %true if an ongoing memcg OOM situation was detected and 1988 * completed, %false otherwise. 1989 */ 1990 bool mem_cgroup_oom_synchronize(bool handle) 1991 { 1992 struct mem_cgroup *memcg = current->memcg_in_oom; 1993 struct oom_wait_info owait; 1994 bool locked; 1995 1996 /* OOM is global, do not handle */ 1997 if (!memcg) 1998 return false; 1999 2000 if (!handle) 2001 goto cleanup; 2002 2003 owait.memcg = memcg; 2004 owait.wait.flags = 0; 2005 owait.wait.func = memcg_oom_wake_function; 2006 owait.wait.private = current; 2007 INIT_LIST_HEAD(&owait.wait.entry); 2008 2009 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2010 mem_cgroup_mark_under_oom(memcg); 2011 2012 locked = mem_cgroup_oom_trylock(memcg); 2013 2014 if (locked) 2015 mem_cgroup_oom_notify(memcg); 2016 2017 if (locked && !memcg->oom_kill_disable) { 2018 mem_cgroup_unmark_under_oom(memcg); 2019 finish_wait(&memcg_oom_waitq, &owait.wait); 2020 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 2021 current->memcg_oom_order); 2022 } else { 2023 schedule(); 2024 mem_cgroup_unmark_under_oom(memcg); 2025 finish_wait(&memcg_oom_waitq, &owait.wait); 2026 } 2027 2028 if (locked) { 2029 mem_cgroup_oom_unlock(memcg); 2030 /* 2031 * There is no guarantee that an OOM-lock contender 2032 * sees the wakeups triggered by the OOM kill 2033 * uncharges. Wake any sleepers explicitely. 2034 */ 2035 memcg_oom_recover(memcg); 2036 } 2037 cleanup: 2038 current->memcg_in_oom = NULL; 2039 css_put(&memcg->css); 2040 return true; 2041 } 2042 2043 /** 2044 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2045 * @victim: task to be killed by the OOM killer 2046 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2047 * 2048 * Returns a pointer to a memory cgroup, which has to be cleaned up 2049 * by killing all belonging OOM-killable tasks. 2050 * 2051 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2052 */ 2053 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2054 struct mem_cgroup *oom_domain) 2055 { 2056 struct mem_cgroup *oom_group = NULL; 2057 struct mem_cgroup *memcg; 2058 2059 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2060 return NULL; 2061 2062 if (!oom_domain) 2063 oom_domain = root_mem_cgroup; 2064 2065 rcu_read_lock(); 2066 2067 memcg = mem_cgroup_from_task(victim); 2068 if (memcg == root_mem_cgroup) 2069 goto out; 2070 2071 /* 2072 * If the victim task has been asynchronously moved to a different 2073 * memory cgroup, we might end up killing tasks outside oom_domain. 2074 * In this case it's better to ignore memory.group.oom. 2075 */ 2076 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2077 goto out; 2078 2079 /* 2080 * Traverse the memory cgroup hierarchy from the victim task's 2081 * cgroup up to the OOMing cgroup (or root) to find the 2082 * highest-level memory cgroup with oom.group set. 2083 */ 2084 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2085 if (memcg->oom_group) 2086 oom_group = memcg; 2087 2088 if (memcg == oom_domain) 2089 break; 2090 } 2091 2092 if (oom_group) 2093 css_get(&oom_group->css); 2094 out: 2095 rcu_read_unlock(); 2096 2097 return oom_group; 2098 } 2099 2100 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2101 { 2102 pr_info("Tasks in "); 2103 pr_cont_cgroup_path(memcg->css.cgroup); 2104 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2105 } 2106 2107 /** 2108 * lock_page_memcg - lock a page and memcg binding 2109 * @page: the page 2110 * 2111 * This function protects unlocked LRU pages from being moved to 2112 * another cgroup. 2113 * 2114 * It ensures lifetime of the returned memcg. Caller is responsible 2115 * for the lifetime of the page; __unlock_page_memcg() is available 2116 * when @page might get freed inside the locked section. 2117 */ 2118 struct mem_cgroup *lock_page_memcg(struct page *page) 2119 { 2120 struct page *head = compound_head(page); /* rmap on tail pages */ 2121 struct mem_cgroup *memcg; 2122 unsigned long flags; 2123 2124 /* 2125 * The RCU lock is held throughout the transaction. The fast 2126 * path can get away without acquiring the memcg->move_lock 2127 * because page moving starts with an RCU grace period. 2128 * 2129 * The RCU lock also protects the memcg from being freed when 2130 * the page state that is going to change is the only thing 2131 * preventing the page itself from being freed. E.g. writeback 2132 * doesn't hold a page reference and relies on PG_writeback to 2133 * keep off truncation, migration and so forth. 2134 */ 2135 rcu_read_lock(); 2136 2137 if (mem_cgroup_disabled()) 2138 return NULL; 2139 again: 2140 memcg = page_memcg(head); 2141 if (unlikely(!memcg)) 2142 return NULL; 2143 2144 if (atomic_read(&memcg->moving_account) <= 0) 2145 return memcg; 2146 2147 spin_lock_irqsave(&memcg->move_lock, flags); 2148 if (memcg != page_memcg(head)) { 2149 spin_unlock_irqrestore(&memcg->move_lock, flags); 2150 goto again; 2151 } 2152 2153 /* 2154 * When charge migration first begins, we can have locked and 2155 * unlocked page stat updates happening concurrently. Track 2156 * the task who has the lock for unlock_page_memcg(). 2157 */ 2158 memcg->move_lock_task = current; 2159 memcg->move_lock_flags = flags; 2160 2161 return memcg; 2162 } 2163 EXPORT_SYMBOL(lock_page_memcg); 2164 2165 /** 2166 * __unlock_page_memcg - unlock and unpin a memcg 2167 * @memcg: the memcg 2168 * 2169 * Unlock and unpin a memcg returned by lock_page_memcg(). 2170 */ 2171 void __unlock_page_memcg(struct mem_cgroup *memcg) 2172 { 2173 if (memcg && memcg->move_lock_task == current) { 2174 unsigned long flags = memcg->move_lock_flags; 2175 2176 memcg->move_lock_task = NULL; 2177 memcg->move_lock_flags = 0; 2178 2179 spin_unlock_irqrestore(&memcg->move_lock, flags); 2180 } 2181 2182 rcu_read_unlock(); 2183 } 2184 2185 /** 2186 * unlock_page_memcg - unlock a page and memcg binding 2187 * @page: the page 2188 */ 2189 void unlock_page_memcg(struct page *page) 2190 { 2191 struct page *head = compound_head(page); 2192 2193 __unlock_page_memcg(page_memcg(head)); 2194 } 2195 EXPORT_SYMBOL(unlock_page_memcg); 2196 2197 struct memcg_stock_pcp { 2198 struct mem_cgroup *cached; /* this never be root cgroup */ 2199 unsigned int nr_pages; 2200 2201 #ifdef CONFIG_MEMCG_KMEM 2202 struct obj_cgroup *cached_objcg; 2203 unsigned int nr_bytes; 2204 #endif 2205 2206 struct work_struct work; 2207 unsigned long flags; 2208 #define FLUSHING_CACHED_CHARGE 0 2209 }; 2210 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2211 static DEFINE_MUTEX(percpu_charge_mutex); 2212 2213 #ifdef CONFIG_MEMCG_KMEM 2214 static void drain_obj_stock(struct memcg_stock_pcp *stock); 2215 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2216 struct mem_cgroup *root_memcg); 2217 2218 #else 2219 static inline void drain_obj_stock(struct memcg_stock_pcp *stock) 2220 { 2221 } 2222 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2223 struct mem_cgroup *root_memcg) 2224 { 2225 return false; 2226 } 2227 #endif 2228 2229 /** 2230 * consume_stock: Try to consume stocked charge on this cpu. 2231 * @memcg: memcg to consume from. 2232 * @nr_pages: how many pages to charge. 2233 * 2234 * The charges will only happen if @memcg matches the current cpu's memcg 2235 * stock, and at least @nr_pages are available in that stock. Failure to 2236 * service an allocation will refill the stock. 2237 * 2238 * returns true if successful, false otherwise. 2239 */ 2240 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2241 { 2242 struct memcg_stock_pcp *stock; 2243 unsigned long flags; 2244 bool ret = false; 2245 2246 if (nr_pages > MEMCG_CHARGE_BATCH) 2247 return ret; 2248 2249 local_irq_save(flags); 2250 2251 stock = this_cpu_ptr(&memcg_stock); 2252 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2253 stock->nr_pages -= nr_pages; 2254 ret = true; 2255 } 2256 2257 local_irq_restore(flags); 2258 2259 return ret; 2260 } 2261 2262 /* 2263 * Returns stocks cached in percpu and reset cached information. 2264 */ 2265 static void drain_stock(struct memcg_stock_pcp *stock) 2266 { 2267 struct mem_cgroup *old = stock->cached; 2268 2269 if (!old) 2270 return; 2271 2272 if (stock->nr_pages) { 2273 page_counter_uncharge(&old->memory, stock->nr_pages); 2274 if (do_memsw_account()) 2275 page_counter_uncharge(&old->memsw, stock->nr_pages); 2276 stock->nr_pages = 0; 2277 } 2278 2279 css_put(&old->css); 2280 stock->cached = NULL; 2281 } 2282 2283 static void drain_local_stock(struct work_struct *dummy) 2284 { 2285 struct memcg_stock_pcp *stock; 2286 unsigned long flags; 2287 2288 /* 2289 * The only protection from memory hotplug vs. drain_stock races is 2290 * that we always operate on local CPU stock here with IRQ disabled 2291 */ 2292 local_irq_save(flags); 2293 2294 stock = this_cpu_ptr(&memcg_stock); 2295 drain_obj_stock(stock); 2296 drain_stock(stock); 2297 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2298 2299 local_irq_restore(flags); 2300 } 2301 2302 /* 2303 * Cache charges(val) to local per_cpu area. 2304 * This will be consumed by consume_stock() function, later. 2305 */ 2306 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2307 { 2308 struct memcg_stock_pcp *stock; 2309 unsigned long flags; 2310 2311 local_irq_save(flags); 2312 2313 stock = this_cpu_ptr(&memcg_stock); 2314 if (stock->cached != memcg) { /* reset if necessary */ 2315 drain_stock(stock); 2316 css_get(&memcg->css); 2317 stock->cached = memcg; 2318 } 2319 stock->nr_pages += nr_pages; 2320 2321 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2322 drain_stock(stock); 2323 2324 local_irq_restore(flags); 2325 } 2326 2327 /* 2328 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2329 * of the hierarchy under it. 2330 */ 2331 static void drain_all_stock(struct mem_cgroup *root_memcg) 2332 { 2333 int cpu, curcpu; 2334 2335 /* If someone's already draining, avoid adding running more workers. */ 2336 if (!mutex_trylock(&percpu_charge_mutex)) 2337 return; 2338 /* 2339 * Notify other cpus that system-wide "drain" is running 2340 * We do not care about races with the cpu hotplug because cpu down 2341 * as well as workers from this path always operate on the local 2342 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2343 */ 2344 curcpu = get_cpu(); 2345 for_each_online_cpu(cpu) { 2346 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2347 struct mem_cgroup *memcg; 2348 bool flush = false; 2349 2350 rcu_read_lock(); 2351 memcg = stock->cached; 2352 if (memcg && stock->nr_pages && 2353 mem_cgroup_is_descendant(memcg, root_memcg)) 2354 flush = true; 2355 if (obj_stock_flush_required(stock, root_memcg)) 2356 flush = true; 2357 rcu_read_unlock(); 2358 2359 if (flush && 2360 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2361 if (cpu == curcpu) 2362 drain_local_stock(&stock->work); 2363 else 2364 schedule_work_on(cpu, &stock->work); 2365 } 2366 } 2367 put_cpu(); 2368 mutex_unlock(&percpu_charge_mutex); 2369 } 2370 2371 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2372 { 2373 struct memcg_stock_pcp *stock; 2374 struct mem_cgroup *memcg, *mi; 2375 2376 stock = &per_cpu(memcg_stock, cpu); 2377 drain_stock(stock); 2378 2379 for_each_mem_cgroup(memcg) { 2380 int i; 2381 2382 for (i = 0; i < MEMCG_NR_STAT; i++) { 2383 int nid; 2384 long x; 2385 2386 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2387 if (x) 2388 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2389 atomic_long_add(x, &memcg->vmstats[i]); 2390 2391 if (i >= NR_VM_NODE_STAT_ITEMS) 2392 continue; 2393 2394 for_each_node(nid) { 2395 struct mem_cgroup_per_node *pn; 2396 2397 pn = mem_cgroup_nodeinfo(memcg, nid); 2398 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2399 if (x) 2400 do { 2401 atomic_long_add(x, &pn->lruvec_stat[i]); 2402 } while ((pn = parent_nodeinfo(pn, nid))); 2403 } 2404 } 2405 2406 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2407 long x; 2408 2409 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2410 if (x) 2411 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2412 atomic_long_add(x, &memcg->vmevents[i]); 2413 } 2414 } 2415 2416 return 0; 2417 } 2418 2419 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2420 unsigned int nr_pages, 2421 gfp_t gfp_mask) 2422 { 2423 unsigned long nr_reclaimed = 0; 2424 2425 do { 2426 unsigned long pflags; 2427 2428 if (page_counter_read(&memcg->memory) <= 2429 READ_ONCE(memcg->memory.high)) 2430 continue; 2431 2432 memcg_memory_event(memcg, MEMCG_HIGH); 2433 2434 psi_memstall_enter(&pflags); 2435 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2436 gfp_mask, true); 2437 psi_memstall_leave(&pflags); 2438 } while ((memcg = parent_mem_cgroup(memcg)) && 2439 !mem_cgroup_is_root(memcg)); 2440 2441 return nr_reclaimed; 2442 } 2443 2444 static void high_work_func(struct work_struct *work) 2445 { 2446 struct mem_cgroup *memcg; 2447 2448 memcg = container_of(work, struct mem_cgroup, high_work); 2449 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2450 } 2451 2452 /* 2453 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2454 * enough to still cause a significant slowdown in most cases, while still 2455 * allowing diagnostics and tracing to proceed without becoming stuck. 2456 */ 2457 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2458 2459 /* 2460 * When calculating the delay, we use these either side of the exponentiation to 2461 * maintain precision and scale to a reasonable number of jiffies (see the table 2462 * below. 2463 * 2464 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2465 * overage ratio to a delay. 2466 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2467 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2468 * to produce a reasonable delay curve. 2469 * 2470 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2471 * reasonable delay curve compared to precision-adjusted overage, not 2472 * penalising heavily at first, but still making sure that growth beyond the 2473 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2474 * example, with a high of 100 megabytes: 2475 * 2476 * +-------+------------------------+ 2477 * | usage | time to allocate in ms | 2478 * +-------+------------------------+ 2479 * | 100M | 0 | 2480 * | 101M | 6 | 2481 * | 102M | 25 | 2482 * | 103M | 57 | 2483 * | 104M | 102 | 2484 * | 105M | 159 | 2485 * | 106M | 230 | 2486 * | 107M | 313 | 2487 * | 108M | 409 | 2488 * | 109M | 518 | 2489 * | 110M | 639 | 2490 * | 111M | 774 | 2491 * | 112M | 921 | 2492 * | 113M | 1081 | 2493 * | 114M | 1254 | 2494 * | 115M | 1439 | 2495 * | 116M | 1638 | 2496 * | 117M | 1849 | 2497 * | 118M | 2000 | 2498 * | 119M | 2000 | 2499 * | 120M | 2000 | 2500 * +-------+------------------------+ 2501 */ 2502 #define MEMCG_DELAY_PRECISION_SHIFT 20 2503 #define MEMCG_DELAY_SCALING_SHIFT 14 2504 2505 static u64 calculate_overage(unsigned long usage, unsigned long high) 2506 { 2507 u64 overage; 2508 2509 if (usage <= high) 2510 return 0; 2511 2512 /* 2513 * Prevent division by 0 in overage calculation by acting as if 2514 * it was a threshold of 1 page 2515 */ 2516 high = max(high, 1UL); 2517 2518 overage = usage - high; 2519 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2520 return div64_u64(overage, high); 2521 } 2522 2523 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2524 { 2525 u64 overage, max_overage = 0; 2526 2527 do { 2528 overage = calculate_overage(page_counter_read(&memcg->memory), 2529 READ_ONCE(memcg->memory.high)); 2530 max_overage = max(overage, max_overage); 2531 } while ((memcg = parent_mem_cgroup(memcg)) && 2532 !mem_cgroup_is_root(memcg)); 2533 2534 return max_overage; 2535 } 2536 2537 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2538 { 2539 u64 overage, max_overage = 0; 2540 2541 do { 2542 overage = calculate_overage(page_counter_read(&memcg->swap), 2543 READ_ONCE(memcg->swap.high)); 2544 if (overage) 2545 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2546 max_overage = max(overage, max_overage); 2547 } while ((memcg = parent_mem_cgroup(memcg)) && 2548 !mem_cgroup_is_root(memcg)); 2549 2550 return max_overage; 2551 } 2552 2553 /* 2554 * Get the number of jiffies that we should penalise a mischievous cgroup which 2555 * is exceeding its memory.high by checking both it and its ancestors. 2556 */ 2557 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2558 unsigned int nr_pages, 2559 u64 max_overage) 2560 { 2561 unsigned long penalty_jiffies; 2562 2563 if (!max_overage) 2564 return 0; 2565 2566 /* 2567 * We use overage compared to memory.high to calculate the number of 2568 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2569 * fairly lenient on small overages, and increasingly harsh when the 2570 * memcg in question makes it clear that it has no intention of stopping 2571 * its crazy behaviour, so we exponentially increase the delay based on 2572 * overage amount. 2573 */ 2574 penalty_jiffies = max_overage * max_overage * HZ; 2575 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2576 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2577 2578 /* 2579 * Factor in the task's own contribution to the overage, such that four 2580 * N-sized allocations are throttled approximately the same as one 2581 * 4N-sized allocation. 2582 * 2583 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2584 * larger the current charge patch is than that. 2585 */ 2586 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2587 } 2588 2589 /* 2590 * Scheduled by try_charge() to be executed from the userland return path 2591 * and reclaims memory over the high limit. 2592 */ 2593 void mem_cgroup_handle_over_high(void) 2594 { 2595 unsigned long penalty_jiffies; 2596 unsigned long pflags; 2597 unsigned long nr_reclaimed; 2598 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2599 int nr_retries = MAX_RECLAIM_RETRIES; 2600 struct mem_cgroup *memcg; 2601 bool in_retry = false; 2602 2603 if (likely(!nr_pages)) 2604 return; 2605 2606 memcg = get_mem_cgroup_from_mm(current->mm); 2607 current->memcg_nr_pages_over_high = 0; 2608 2609 retry_reclaim: 2610 /* 2611 * The allocating task should reclaim at least the batch size, but for 2612 * subsequent retries we only want to do what's necessary to prevent oom 2613 * or breaching resource isolation. 2614 * 2615 * This is distinct from memory.max or page allocator behaviour because 2616 * memory.high is currently batched, whereas memory.max and the page 2617 * allocator run every time an allocation is made. 2618 */ 2619 nr_reclaimed = reclaim_high(memcg, 2620 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2621 GFP_KERNEL); 2622 2623 /* 2624 * memory.high is breached and reclaim is unable to keep up. Throttle 2625 * allocators proactively to slow down excessive growth. 2626 */ 2627 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2628 mem_find_max_overage(memcg)); 2629 2630 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2631 swap_find_max_overage(memcg)); 2632 2633 /* 2634 * Clamp the max delay per usermode return so as to still keep the 2635 * application moving forwards and also permit diagnostics, albeit 2636 * extremely slowly. 2637 */ 2638 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2639 2640 /* 2641 * Don't sleep if the amount of jiffies this memcg owes us is so low 2642 * that it's not even worth doing, in an attempt to be nice to those who 2643 * go only a small amount over their memory.high value and maybe haven't 2644 * been aggressively reclaimed enough yet. 2645 */ 2646 if (penalty_jiffies <= HZ / 100) 2647 goto out; 2648 2649 /* 2650 * If reclaim is making forward progress but we're still over 2651 * memory.high, we want to encourage that rather than doing allocator 2652 * throttling. 2653 */ 2654 if (nr_reclaimed || nr_retries--) { 2655 in_retry = true; 2656 goto retry_reclaim; 2657 } 2658 2659 /* 2660 * If we exit early, we're guaranteed to die (since 2661 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2662 * need to account for any ill-begotten jiffies to pay them off later. 2663 */ 2664 psi_memstall_enter(&pflags); 2665 schedule_timeout_killable(penalty_jiffies); 2666 psi_memstall_leave(&pflags); 2667 2668 out: 2669 css_put(&memcg->css); 2670 } 2671 2672 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2673 unsigned int nr_pages) 2674 { 2675 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2676 int nr_retries = MAX_RECLAIM_RETRIES; 2677 struct mem_cgroup *mem_over_limit; 2678 struct page_counter *counter; 2679 enum oom_status oom_status; 2680 unsigned long nr_reclaimed; 2681 bool may_swap = true; 2682 bool drained = false; 2683 unsigned long pflags; 2684 2685 if (mem_cgroup_is_root(memcg)) 2686 return 0; 2687 retry: 2688 if (consume_stock(memcg, nr_pages)) 2689 return 0; 2690 2691 if (!do_memsw_account() || 2692 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2693 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2694 goto done_restock; 2695 if (do_memsw_account()) 2696 page_counter_uncharge(&memcg->memsw, batch); 2697 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2698 } else { 2699 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2700 may_swap = false; 2701 } 2702 2703 if (batch > nr_pages) { 2704 batch = nr_pages; 2705 goto retry; 2706 } 2707 2708 /* 2709 * Memcg doesn't have a dedicated reserve for atomic 2710 * allocations. But like the global atomic pool, we need to 2711 * put the burden of reclaim on regular allocation requests 2712 * and let these go through as privileged allocations. 2713 */ 2714 if (gfp_mask & __GFP_ATOMIC) 2715 goto force; 2716 2717 /* 2718 * Unlike in global OOM situations, memcg is not in a physical 2719 * memory shortage. Allow dying and OOM-killed tasks to 2720 * bypass the last charges so that they can exit quickly and 2721 * free their memory. 2722 */ 2723 if (unlikely(should_force_charge())) 2724 goto force; 2725 2726 /* 2727 * Prevent unbounded recursion when reclaim operations need to 2728 * allocate memory. This might exceed the limits temporarily, 2729 * but we prefer facilitating memory reclaim and getting back 2730 * under the limit over triggering OOM kills in these cases. 2731 */ 2732 if (unlikely(current->flags & PF_MEMALLOC)) 2733 goto force; 2734 2735 if (unlikely(task_in_memcg_oom(current))) 2736 goto nomem; 2737 2738 if (!gfpflags_allow_blocking(gfp_mask)) 2739 goto nomem; 2740 2741 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2742 2743 psi_memstall_enter(&pflags); 2744 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2745 gfp_mask, may_swap); 2746 psi_memstall_leave(&pflags); 2747 2748 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2749 goto retry; 2750 2751 if (!drained) { 2752 drain_all_stock(mem_over_limit); 2753 drained = true; 2754 goto retry; 2755 } 2756 2757 if (gfp_mask & __GFP_NORETRY) 2758 goto nomem; 2759 /* 2760 * Even though the limit is exceeded at this point, reclaim 2761 * may have been able to free some pages. Retry the charge 2762 * before killing the task. 2763 * 2764 * Only for regular pages, though: huge pages are rather 2765 * unlikely to succeed so close to the limit, and we fall back 2766 * to regular pages anyway in case of failure. 2767 */ 2768 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2769 goto retry; 2770 /* 2771 * At task move, charge accounts can be doubly counted. So, it's 2772 * better to wait until the end of task_move if something is going on. 2773 */ 2774 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2775 goto retry; 2776 2777 if (nr_retries--) 2778 goto retry; 2779 2780 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2781 goto nomem; 2782 2783 if (gfp_mask & __GFP_NOFAIL) 2784 goto force; 2785 2786 if (fatal_signal_pending(current)) 2787 goto force; 2788 2789 /* 2790 * keep retrying as long as the memcg oom killer is able to make 2791 * a forward progress or bypass the charge if the oom killer 2792 * couldn't make any progress. 2793 */ 2794 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2795 get_order(nr_pages * PAGE_SIZE)); 2796 switch (oom_status) { 2797 case OOM_SUCCESS: 2798 nr_retries = MAX_RECLAIM_RETRIES; 2799 goto retry; 2800 case OOM_FAILED: 2801 goto force; 2802 default: 2803 goto nomem; 2804 } 2805 nomem: 2806 if (!(gfp_mask & __GFP_NOFAIL)) 2807 return -ENOMEM; 2808 force: 2809 /* 2810 * The allocation either can't fail or will lead to more memory 2811 * being freed very soon. Allow memory usage go over the limit 2812 * temporarily by force charging it. 2813 */ 2814 page_counter_charge(&memcg->memory, nr_pages); 2815 if (do_memsw_account()) 2816 page_counter_charge(&memcg->memsw, nr_pages); 2817 2818 return 0; 2819 2820 done_restock: 2821 if (batch > nr_pages) 2822 refill_stock(memcg, batch - nr_pages); 2823 2824 /* 2825 * If the hierarchy is above the normal consumption range, schedule 2826 * reclaim on returning to userland. We can perform reclaim here 2827 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2828 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2829 * not recorded as it most likely matches current's and won't 2830 * change in the meantime. As high limit is checked again before 2831 * reclaim, the cost of mismatch is negligible. 2832 */ 2833 do { 2834 bool mem_high, swap_high; 2835 2836 mem_high = page_counter_read(&memcg->memory) > 2837 READ_ONCE(memcg->memory.high); 2838 swap_high = page_counter_read(&memcg->swap) > 2839 READ_ONCE(memcg->swap.high); 2840 2841 /* Don't bother a random interrupted task */ 2842 if (in_interrupt()) { 2843 if (mem_high) { 2844 schedule_work(&memcg->high_work); 2845 break; 2846 } 2847 continue; 2848 } 2849 2850 if (mem_high || swap_high) { 2851 /* 2852 * The allocating tasks in this cgroup will need to do 2853 * reclaim or be throttled to prevent further growth 2854 * of the memory or swap footprints. 2855 * 2856 * Target some best-effort fairness between the tasks, 2857 * and distribute reclaim work and delay penalties 2858 * based on how much each task is actually allocating. 2859 */ 2860 current->memcg_nr_pages_over_high += batch; 2861 set_notify_resume(current); 2862 break; 2863 } 2864 } while ((memcg = parent_mem_cgroup(memcg))); 2865 2866 return 0; 2867 } 2868 2869 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2870 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2871 { 2872 if (mem_cgroup_is_root(memcg)) 2873 return; 2874 2875 page_counter_uncharge(&memcg->memory, nr_pages); 2876 if (do_memsw_account()) 2877 page_counter_uncharge(&memcg->memsw, nr_pages); 2878 } 2879 #endif 2880 2881 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2882 { 2883 VM_BUG_ON_PAGE(page_memcg(page), page); 2884 /* 2885 * Any of the following ensures page->mem_cgroup stability: 2886 * 2887 * - the page lock 2888 * - LRU isolation 2889 * - lock_page_memcg() 2890 * - exclusive reference 2891 */ 2892 page->memcg_data = (unsigned long)memcg; 2893 } 2894 2895 #ifdef CONFIG_MEMCG_KMEM 2896 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2897 gfp_t gfp) 2898 { 2899 unsigned int objects = objs_per_slab_page(s, page); 2900 void *vec; 2901 2902 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2903 page_to_nid(page)); 2904 if (!vec) 2905 return -ENOMEM; 2906 2907 if (!set_page_objcgs(page, vec)) 2908 kfree(vec); 2909 else 2910 kmemleak_not_leak(vec); 2911 2912 return 0; 2913 } 2914 2915 /* 2916 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2917 * 2918 * A passed kernel object can be a slab object or a generic kernel page, so 2919 * different mechanisms for getting the memory cgroup pointer should be used. 2920 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2921 * can not know for sure how the kernel object is implemented. 2922 * mem_cgroup_from_obj() can be safely used in such cases. 2923 * 2924 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2925 * cgroup_mutex, etc. 2926 */ 2927 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2928 { 2929 struct page *page; 2930 2931 if (mem_cgroup_disabled()) 2932 return NULL; 2933 2934 page = virt_to_head_page(p); 2935 2936 /* 2937 * Slab objects are accounted individually, not per-page. 2938 * Memcg membership data for each individual object is saved in 2939 * the page->obj_cgroups. 2940 */ 2941 if (page_objcgs_check(page)) { 2942 struct obj_cgroup *objcg; 2943 unsigned int off; 2944 2945 off = obj_to_index(page->slab_cache, page, p); 2946 objcg = page_objcgs(page)[off]; 2947 if (objcg) 2948 return obj_cgroup_memcg(objcg); 2949 2950 return NULL; 2951 } 2952 2953 /* 2954 * page_memcg_check() is used here, because page_has_obj_cgroups() 2955 * check above could fail because the object cgroups vector wasn't set 2956 * at that moment, but it can be set concurrently. 2957 * page_memcg_check(page) will guarantee that a proper memory 2958 * cgroup pointer or NULL will be returned. 2959 */ 2960 return page_memcg_check(page); 2961 } 2962 2963 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2964 { 2965 struct obj_cgroup *objcg = NULL; 2966 struct mem_cgroup *memcg; 2967 2968 if (memcg_kmem_bypass()) 2969 return NULL; 2970 2971 rcu_read_lock(); 2972 if (unlikely(active_memcg())) 2973 memcg = active_memcg(); 2974 else 2975 memcg = mem_cgroup_from_task(current); 2976 2977 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2978 objcg = rcu_dereference(memcg->objcg); 2979 if (objcg && obj_cgroup_tryget(objcg)) 2980 break; 2981 } 2982 rcu_read_unlock(); 2983 2984 return objcg; 2985 } 2986 2987 static int memcg_alloc_cache_id(void) 2988 { 2989 int id, size; 2990 int err; 2991 2992 id = ida_simple_get(&memcg_cache_ida, 2993 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2994 if (id < 0) 2995 return id; 2996 2997 if (id < memcg_nr_cache_ids) 2998 return id; 2999 3000 /* 3001 * There's no space for the new id in memcg_caches arrays, 3002 * so we have to grow them. 3003 */ 3004 down_write(&memcg_cache_ids_sem); 3005 3006 size = 2 * (id + 1); 3007 if (size < MEMCG_CACHES_MIN_SIZE) 3008 size = MEMCG_CACHES_MIN_SIZE; 3009 else if (size > MEMCG_CACHES_MAX_SIZE) 3010 size = MEMCG_CACHES_MAX_SIZE; 3011 3012 err = memcg_update_all_list_lrus(size); 3013 if (!err) 3014 memcg_nr_cache_ids = size; 3015 3016 up_write(&memcg_cache_ids_sem); 3017 3018 if (err) { 3019 ida_simple_remove(&memcg_cache_ida, id); 3020 return err; 3021 } 3022 return id; 3023 } 3024 3025 static void memcg_free_cache_id(int id) 3026 { 3027 ida_simple_remove(&memcg_cache_ida, id); 3028 } 3029 3030 /** 3031 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 3032 * @memcg: memory cgroup to charge 3033 * @gfp: reclaim mode 3034 * @nr_pages: number of pages to charge 3035 * 3036 * Returns 0 on success, an error code on failure. 3037 */ 3038 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 3039 unsigned int nr_pages) 3040 { 3041 struct page_counter *counter; 3042 int ret; 3043 3044 ret = try_charge(memcg, gfp, nr_pages); 3045 if (ret) 3046 return ret; 3047 3048 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3049 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3050 3051 /* 3052 * Enforce __GFP_NOFAIL allocation because callers are not 3053 * prepared to see failures and likely do not have any failure 3054 * handling code. 3055 */ 3056 if (gfp & __GFP_NOFAIL) { 3057 page_counter_charge(&memcg->kmem, nr_pages); 3058 return 0; 3059 } 3060 cancel_charge(memcg, nr_pages); 3061 return -ENOMEM; 3062 } 3063 return 0; 3064 } 3065 3066 /** 3067 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 3068 * @memcg: memcg to uncharge 3069 * @nr_pages: number of pages to uncharge 3070 */ 3071 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 3072 { 3073 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3074 page_counter_uncharge(&memcg->kmem, nr_pages); 3075 3076 page_counter_uncharge(&memcg->memory, nr_pages); 3077 if (do_memsw_account()) 3078 page_counter_uncharge(&memcg->memsw, nr_pages); 3079 } 3080 3081 /** 3082 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3083 * @page: page to charge 3084 * @gfp: reclaim mode 3085 * @order: allocation order 3086 * 3087 * Returns 0 on success, an error code on failure. 3088 */ 3089 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3090 { 3091 struct mem_cgroup *memcg; 3092 int ret = 0; 3093 3094 memcg = get_mem_cgroup_from_current(); 3095 if (memcg && !mem_cgroup_is_root(memcg)) { 3096 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 3097 if (!ret) { 3098 page->memcg_data = (unsigned long)memcg | 3099 MEMCG_DATA_KMEM; 3100 return 0; 3101 } 3102 css_put(&memcg->css); 3103 } 3104 return ret; 3105 } 3106 3107 /** 3108 * __memcg_kmem_uncharge_page: uncharge a kmem page 3109 * @page: page to uncharge 3110 * @order: allocation order 3111 */ 3112 void __memcg_kmem_uncharge_page(struct page *page, int order) 3113 { 3114 struct mem_cgroup *memcg = page_memcg(page); 3115 unsigned int nr_pages = 1 << order; 3116 3117 if (!memcg) 3118 return; 3119 3120 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3121 __memcg_kmem_uncharge(memcg, nr_pages); 3122 page->memcg_data = 0; 3123 css_put(&memcg->css); 3124 } 3125 3126 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3127 { 3128 struct memcg_stock_pcp *stock; 3129 unsigned long flags; 3130 bool ret = false; 3131 3132 local_irq_save(flags); 3133 3134 stock = this_cpu_ptr(&memcg_stock); 3135 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3136 stock->nr_bytes -= nr_bytes; 3137 ret = true; 3138 } 3139 3140 local_irq_restore(flags); 3141 3142 return ret; 3143 } 3144 3145 static void drain_obj_stock(struct memcg_stock_pcp *stock) 3146 { 3147 struct obj_cgroup *old = stock->cached_objcg; 3148 3149 if (!old) 3150 return; 3151 3152 if (stock->nr_bytes) { 3153 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3154 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3155 3156 if (nr_pages) { 3157 rcu_read_lock(); 3158 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages); 3159 rcu_read_unlock(); 3160 } 3161 3162 /* 3163 * The leftover is flushed to the centralized per-memcg value. 3164 * On the next attempt to refill obj stock it will be moved 3165 * to a per-cpu stock (probably, on an other CPU), see 3166 * refill_obj_stock(). 3167 * 3168 * How often it's flushed is a trade-off between the memory 3169 * limit enforcement accuracy and potential CPU contention, 3170 * so it might be changed in the future. 3171 */ 3172 atomic_add(nr_bytes, &old->nr_charged_bytes); 3173 stock->nr_bytes = 0; 3174 } 3175 3176 obj_cgroup_put(old); 3177 stock->cached_objcg = NULL; 3178 } 3179 3180 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3181 struct mem_cgroup *root_memcg) 3182 { 3183 struct mem_cgroup *memcg; 3184 3185 if (stock->cached_objcg) { 3186 memcg = obj_cgroup_memcg(stock->cached_objcg); 3187 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3188 return true; 3189 } 3190 3191 return false; 3192 } 3193 3194 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3195 { 3196 struct memcg_stock_pcp *stock; 3197 unsigned long flags; 3198 3199 local_irq_save(flags); 3200 3201 stock = this_cpu_ptr(&memcg_stock); 3202 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3203 drain_obj_stock(stock); 3204 obj_cgroup_get(objcg); 3205 stock->cached_objcg = objcg; 3206 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); 3207 } 3208 stock->nr_bytes += nr_bytes; 3209 3210 if (stock->nr_bytes > PAGE_SIZE) 3211 drain_obj_stock(stock); 3212 3213 local_irq_restore(flags); 3214 } 3215 3216 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3217 { 3218 struct mem_cgroup *memcg; 3219 unsigned int nr_pages, nr_bytes; 3220 int ret; 3221 3222 if (consume_obj_stock(objcg, size)) 3223 return 0; 3224 3225 /* 3226 * In theory, memcg->nr_charged_bytes can have enough 3227 * pre-charged bytes to satisfy the allocation. However, 3228 * flushing memcg->nr_charged_bytes requires two atomic 3229 * operations, and memcg->nr_charged_bytes can't be big, 3230 * so it's better to ignore it and try grab some new pages. 3231 * memcg->nr_charged_bytes will be flushed in 3232 * refill_obj_stock(), called from this function or 3233 * independently later. 3234 */ 3235 rcu_read_lock(); 3236 memcg = obj_cgroup_memcg(objcg); 3237 css_get(&memcg->css); 3238 rcu_read_unlock(); 3239 3240 nr_pages = size >> PAGE_SHIFT; 3241 nr_bytes = size & (PAGE_SIZE - 1); 3242 3243 if (nr_bytes) 3244 nr_pages += 1; 3245 3246 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); 3247 if (!ret && nr_bytes) 3248 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); 3249 3250 css_put(&memcg->css); 3251 return ret; 3252 } 3253 3254 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3255 { 3256 refill_obj_stock(objcg, size); 3257 } 3258 3259 #endif /* CONFIG_MEMCG_KMEM */ 3260 3261 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3262 3263 /* 3264 * Because tail pages are not marked as "used", set it. We're under 3265 * pgdat->lru_lock and migration entries setup in all page mappings. 3266 */ 3267 void mem_cgroup_split_huge_fixup(struct page *head) 3268 { 3269 struct mem_cgroup *memcg = page_memcg(head); 3270 int i; 3271 3272 if (mem_cgroup_disabled()) 3273 return; 3274 3275 for (i = 1; i < HPAGE_PMD_NR; i++) { 3276 css_get(&memcg->css); 3277 head[i].memcg_data = (unsigned long)memcg; 3278 } 3279 } 3280 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3281 3282 #ifdef CONFIG_MEMCG_SWAP 3283 /** 3284 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3285 * @entry: swap entry to be moved 3286 * @from: mem_cgroup which the entry is moved from 3287 * @to: mem_cgroup which the entry is moved to 3288 * 3289 * It succeeds only when the swap_cgroup's record for this entry is the same 3290 * as the mem_cgroup's id of @from. 3291 * 3292 * Returns 0 on success, -EINVAL on failure. 3293 * 3294 * The caller must have charged to @to, IOW, called page_counter_charge() about 3295 * both res and memsw, and called css_get(). 3296 */ 3297 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3298 struct mem_cgroup *from, struct mem_cgroup *to) 3299 { 3300 unsigned short old_id, new_id; 3301 3302 old_id = mem_cgroup_id(from); 3303 new_id = mem_cgroup_id(to); 3304 3305 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3306 mod_memcg_state(from, MEMCG_SWAP, -1); 3307 mod_memcg_state(to, MEMCG_SWAP, 1); 3308 return 0; 3309 } 3310 return -EINVAL; 3311 } 3312 #else 3313 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3314 struct mem_cgroup *from, struct mem_cgroup *to) 3315 { 3316 return -EINVAL; 3317 } 3318 #endif 3319 3320 static DEFINE_MUTEX(memcg_max_mutex); 3321 3322 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3323 unsigned long max, bool memsw) 3324 { 3325 bool enlarge = false; 3326 bool drained = false; 3327 int ret; 3328 bool limits_invariant; 3329 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3330 3331 do { 3332 if (signal_pending(current)) { 3333 ret = -EINTR; 3334 break; 3335 } 3336 3337 mutex_lock(&memcg_max_mutex); 3338 /* 3339 * Make sure that the new limit (memsw or memory limit) doesn't 3340 * break our basic invariant rule memory.max <= memsw.max. 3341 */ 3342 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3343 max <= memcg->memsw.max; 3344 if (!limits_invariant) { 3345 mutex_unlock(&memcg_max_mutex); 3346 ret = -EINVAL; 3347 break; 3348 } 3349 if (max > counter->max) 3350 enlarge = true; 3351 ret = page_counter_set_max(counter, max); 3352 mutex_unlock(&memcg_max_mutex); 3353 3354 if (!ret) 3355 break; 3356 3357 if (!drained) { 3358 drain_all_stock(memcg); 3359 drained = true; 3360 continue; 3361 } 3362 3363 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3364 GFP_KERNEL, !memsw)) { 3365 ret = -EBUSY; 3366 break; 3367 } 3368 } while (true); 3369 3370 if (!ret && enlarge) 3371 memcg_oom_recover(memcg); 3372 3373 return ret; 3374 } 3375 3376 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3377 gfp_t gfp_mask, 3378 unsigned long *total_scanned) 3379 { 3380 unsigned long nr_reclaimed = 0; 3381 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3382 unsigned long reclaimed; 3383 int loop = 0; 3384 struct mem_cgroup_tree_per_node *mctz; 3385 unsigned long excess; 3386 unsigned long nr_scanned; 3387 3388 if (order > 0) 3389 return 0; 3390 3391 mctz = soft_limit_tree_node(pgdat->node_id); 3392 3393 /* 3394 * Do not even bother to check the largest node if the root 3395 * is empty. Do it lockless to prevent lock bouncing. Races 3396 * are acceptable as soft limit is best effort anyway. 3397 */ 3398 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3399 return 0; 3400 3401 /* 3402 * This loop can run a while, specially if mem_cgroup's continuously 3403 * keep exceeding their soft limit and putting the system under 3404 * pressure 3405 */ 3406 do { 3407 if (next_mz) 3408 mz = next_mz; 3409 else 3410 mz = mem_cgroup_largest_soft_limit_node(mctz); 3411 if (!mz) 3412 break; 3413 3414 nr_scanned = 0; 3415 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3416 gfp_mask, &nr_scanned); 3417 nr_reclaimed += reclaimed; 3418 *total_scanned += nr_scanned; 3419 spin_lock_irq(&mctz->lock); 3420 __mem_cgroup_remove_exceeded(mz, mctz); 3421 3422 /* 3423 * If we failed to reclaim anything from this memory cgroup 3424 * it is time to move on to the next cgroup 3425 */ 3426 next_mz = NULL; 3427 if (!reclaimed) 3428 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3429 3430 excess = soft_limit_excess(mz->memcg); 3431 /* 3432 * One school of thought says that we should not add 3433 * back the node to the tree if reclaim returns 0. 3434 * But our reclaim could return 0, simply because due 3435 * to priority we are exposing a smaller subset of 3436 * memory to reclaim from. Consider this as a longer 3437 * term TODO. 3438 */ 3439 /* If excess == 0, no tree ops */ 3440 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3441 spin_unlock_irq(&mctz->lock); 3442 css_put(&mz->memcg->css); 3443 loop++; 3444 /* 3445 * Could not reclaim anything and there are no more 3446 * mem cgroups to try or we seem to be looping without 3447 * reclaiming anything. 3448 */ 3449 if (!nr_reclaimed && 3450 (next_mz == NULL || 3451 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3452 break; 3453 } while (!nr_reclaimed); 3454 if (next_mz) 3455 css_put(&next_mz->memcg->css); 3456 return nr_reclaimed; 3457 } 3458 3459 /* 3460 * Test whether @memcg has children, dead or alive. Note that this 3461 * function doesn't care whether @memcg has use_hierarchy enabled and 3462 * returns %true if there are child csses according to the cgroup 3463 * hierarchy. Testing use_hierarchy is the caller's responsibility. 3464 */ 3465 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3466 { 3467 bool ret; 3468 3469 rcu_read_lock(); 3470 ret = css_next_child(NULL, &memcg->css); 3471 rcu_read_unlock(); 3472 return ret; 3473 } 3474 3475 /* 3476 * Reclaims as many pages from the given memcg as possible. 3477 * 3478 * Caller is responsible for holding css reference for memcg. 3479 */ 3480 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3481 { 3482 int nr_retries = MAX_RECLAIM_RETRIES; 3483 3484 /* we call try-to-free pages for make this cgroup empty */ 3485 lru_add_drain_all(); 3486 3487 drain_all_stock(memcg); 3488 3489 /* try to free all pages in this cgroup */ 3490 while (nr_retries && page_counter_read(&memcg->memory)) { 3491 int progress; 3492 3493 if (signal_pending(current)) 3494 return -EINTR; 3495 3496 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3497 GFP_KERNEL, true); 3498 if (!progress) { 3499 nr_retries--; 3500 /* maybe some writeback is necessary */ 3501 congestion_wait(BLK_RW_ASYNC, HZ/10); 3502 } 3503 3504 } 3505 3506 return 0; 3507 } 3508 3509 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3510 char *buf, size_t nbytes, 3511 loff_t off) 3512 { 3513 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3514 3515 if (mem_cgroup_is_root(memcg)) 3516 return -EINVAL; 3517 return mem_cgroup_force_empty(memcg) ?: nbytes; 3518 } 3519 3520 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3521 struct cftype *cft) 3522 { 3523 return mem_cgroup_from_css(css)->use_hierarchy; 3524 } 3525 3526 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3527 struct cftype *cft, u64 val) 3528 { 3529 int retval = 0; 3530 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3531 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3532 3533 if (memcg->use_hierarchy == val) 3534 return 0; 3535 3536 /* 3537 * If parent's use_hierarchy is set, we can't make any modifications 3538 * in the child subtrees. If it is unset, then the change can 3539 * occur, provided the current cgroup has no children. 3540 * 3541 * For the root cgroup, parent_mem is NULL, we allow value to be 3542 * set if there are no children. 3543 */ 3544 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3545 (val == 1 || val == 0)) { 3546 if (!memcg_has_children(memcg)) 3547 memcg->use_hierarchy = val; 3548 else 3549 retval = -EBUSY; 3550 } else 3551 retval = -EINVAL; 3552 3553 return retval; 3554 } 3555 3556 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3557 { 3558 unsigned long val; 3559 3560 if (mem_cgroup_is_root(memcg)) { 3561 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3562 memcg_page_state(memcg, NR_ANON_MAPPED); 3563 if (swap) 3564 val += memcg_page_state(memcg, MEMCG_SWAP); 3565 } else { 3566 if (!swap) 3567 val = page_counter_read(&memcg->memory); 3568 else 3569 val = page_counter_read(&memcg->memsw); 3570 } 3571 return val; 3572 } 3573 3574 enum { 3575 RES_USAGE, 3576 RES_LIMIT, 3577 RES_MAX_USAGE, 3578 RES_FAILCNT, 3579 RES_SOFT_LIMIT, 3580 }; 3581 3582 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3583 struct cftype *cft) 3584 { 3585 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3586 struct page_counter *counter; 3587 3588 switch (MEMFILE_TYPE(cft->private)) { 3589 case _MEM: 3590 counter = &memcg->memory; 3591 break; 3592 case _MEMSWAP: 3593 counter = &memcg->memsw; 3594 break; 3595 case _KMEM: 3596 counter = &memcg->kmem; 3597 break; 3598 case _TCP: 3599 counter = &memcg->tcpmem; 3600 break; 3601 default: 3602 BUG(); 3603 } 3604 3605 switch (MEMFILE_ATTR(cft->private)) { 3606 case RES_USAGE: 3607 if (counter == &memcg->memory) 3608 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3609 if (counter == &memcg->memsw) 3610 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3611 return (u64)page_counter_read(counter) * PAGE_SIZE; 3612 case RES_LIMIT: 3613 return (u64)counter->max * PAGE_SIZE; 3614 case RES_MAX_USAGE: 3615 return (u64)counter->watermark * PAGE_SIZE; 3616 case RES_FAILCNT: 3617 return counter->failcnt; 3618 case RES_SOFT_LIMIT: 3619 return (u64)memcg->soft_limit * PAGE_SIZE; 3620 default: 3621 BUG(); 3622 } 3623 } 3624 3625 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3626 { 3627 unsigned long stat[MEMCG_NR_STAT] = {0}; 3628 struct mem_cgroup *mi; 3629 int node, cpu, i; 3630 3631 for_each_online_cpu(cpu) 3632 for (i = 0; i < MEMCG_NR_STAT; i++) 3633 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3634 3635 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3636 for (i = 0; i < MEMCG_NR_STAT; i++) 3637 atomic_long_add(stat[i], &mi->vmstats[i]); 3638 3639 for_each_node(node) { 3640 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3641 struct mem_cgroup_per_node *pi; 3642 3643 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3644 stat[i] = 0; 3645 3646 for_each_online_cpu(cpu) 3647 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3648 stat[i] += per_cpu( 3649 pn->lruvec_stat_cpu->count[i], cpu); 3650 3651 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3652 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3653 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3654 } 3655 } 3656 3657 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3658 { 3659 unsigned long events[NR_VM_EVENT_ITEMS]; 3660 struct mem_cgroup *mi; 3661 int cpu, i; 3662 3663 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3664 events[i] = 0; 3665 3666 for_each_online_cpu(cpu) 3667 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3668 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3669 cpu); 3670 3671 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3672 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3673 atomic_long_add(events[i], &mi->vmevents[i]); 3674 } 3675 3676 #ifdef CONFIG_MEMCG_KMEM 3677 static int memcg_online_kmem(struct mem_cgroup *memcg) 3678 { 3679 struct obj_cgroup *objcg; 3680 int memcg_id; 3681 3682 if (cgroup_memory_nokmem) 3683 return 0; 3684 3685 BUG_ON(memcg->kmemcg_id >= 0); 3686 BUG_ON(memcg->kmem_state); 3687 3688 memcg_id = memcg_alloc_cache_id(); 3689 if (memcg_id < 0) 3690 return memcg_id; 3691 3692 objcg = obj_cgroup_alloc(); 3693 if (!objcg) { 3694 memcg_free_cache_id(memcg_id); 3695 return -ENOMEM; 3696 } 3697 objcg->memcg = memcg; 3698 rcu_assign_pointer(memcg->objcg, objcg); 3699 3700 static_branch_enable(&memcg_kmem_enabled_key); 3701 3702 /* 3703 * A memory cgroup is considered kmem-online as soon as it gets 3704 * kmemcg_id. Setting the id after enabling static branching will 3705 * guarantee no one starts accounting before all call sites are 3706 * patched. 3707 */ 3708 memcg->kmemcg_id = memcg_id; 3709 memcg->kmem_state = KMEM_ONLINE; 3710 3711 return 0; 3712 } 3713 3714 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3715 { 3716 struct cgroup_subsys_state *css; 3717 struct mem_cgroup *parent, *child; 3718 int kmemcg_id; 3719 3720 if (memcg->kmem_state != KMEM_ONLINE) 3721 return; 3722 3723 memcg->kmem_state = KMEM_ALLOCATED; 3724 3725 parent = parent_mem_cgroup(memcg); 3726 if (!parent) 3727 parent = root_mem_cgroup; 3728 3729 memcg_reparent_objcgs(memcg, parent); 3730 3731 kmemcg_id = memcg->kmemcg_id; 3732 BUG_ON(kmemcg_id < 0); 3733 3734 /* 3735 * Change kmemcg_id of this cgroup and all its descendants to the 3736 * parent's id, and then move all entries from this cgroup's list_lrus 3737 * to ones of the parent. After we have finished, all list_lrus 3738 * corresponding to this cgroup are guaranteed to remain empty. The 3739 * ordering is imposed by list_lru_node->lock taken by 3740 * memcg_drain_all_list_lrus(). 3741 */ 3742 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3743 css_for_each_descendant_pre(css, &memcg->css) { 3744 child = mem_cgroup_from_css(css); 3745 BUG_ON(child->kmemcg_id != kmemcg_id); 3746 child->kmemcg_id = parent->kmemcg_id; 3747 if (!memcg->use_hierarchy) 3748 break; 3749 } 3750 rcu_read_unlock(); 3751 3752 memcg_drain_all_list_lrus(kmemcg_id, parent); 3753 3754 memcg_free_cache_id(kmemcg_id); 3755 } 3756 3757 static void memcg_free_kmem(struct mem_cgroup *memcg) 3758 { 3759 /* css_alloc() failed, offlining didn't happen */ 3760 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3761 memcg_offline_kmem(memcg); 3762 } 3763 #else 3764 static int memcg_online_kmem(struct mem_cgroup *memcg) 3765 { 3766 return 0; 3767 } 3768 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3769 { 3770 } 3771 static void memcg_free_kmem(struct mem_cgroup *memcg) 3772 { 3773 } 3774 #endif /* CONFIG_MEMCG_KMEM */ 3775 3776 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3777 unsigned long max) 3778 { 3779 int ret; 3780 3781 mutex_lock(&memcg_max_mutex); 3782 ret = page_counter_set_max(&memcg->kmem, max); 3783 mutex_unlock(&memcg_max_mutex); 3784 return ret; 3785 } 3786 3787 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3788 { 3789 int ret; 3790 3791 mutex_lock(&memcg_max_mutex); 3792 3793 ret = page_counter_set_max(&memcg->tcpmem, max); 3794 if (ret) 3795 goto out; 3796 3797 if (!memcg->tcpmem_active) { 3798 /* 3799 * The active flag needs to be written after the static_key 3800 * update. This is what guarantees that the socket activation 3801 * function is the last one to run. See mem_cgroup_sk_alloc() 3802 * for details, and note that we don't mark any socket as 3803 * belonging to this memcg until that flag is up. 3804 * 3805 * We need to do this, because static_keys will span multiple 3806 * sites, but we can't control their order. If we mark a socket 3807 * as accounted, but the accounting functions are not patched in 3808 * yet, we'll lose accounting. 3809 * 3810 * We never race with the readers in mem_cgroup_sk_alloc(), 3811 * because when this value change, the code to process it is not 3812 * patched in yet. 3813 */ 3814 static_branch_inc(&memcg_sockets_enabled_key); 3815 memcg->tcpmem_active = true; 3816 } 3817 out: 3818 mutex_unlock(&memcg_max_mutex); 3819 return ret; 3820 } 3821 3822 /* 3823 * The user of this function is... 3824 * RES_LIMIT. 3825 */ 3826 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3827 char *buf, size_t nbytes, loff_t off) 3828 { 3829 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3830 unsigned long nr_pages; 3831 int ret; 3832 3833 buf = strstrip(buf); 3834 ret = page_counter_memparse(buf, "-1", &nr_pages); 3835 if (ret) 3836 return ret; 3837 3838 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3839 case RES_LIMIT: 3840 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3841 ret = -EINVAL; 3842 break; 3843 } 3844 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3845 case _MEM: 3846 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3847 break; 3848 case _MEMSWAP: 3849 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3850 break; 3851 case _KMEM: 3852 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3853 "Please report your usecase to linux-mm@kvack.org if you " 3854 "depend on this functionality.\n"); 3855 ret = memcg_update_kmem_max(memcg, nr_pages); 3856 break; 3857 case _TCP: 3858 ret = memcg_update_tcp_max(memcg, nr_pages); 3859 break; 3860 } 3861 break; 3862 case RES_SOFT_LIMIT: 3863 memcg->soft_limit = nr_pages; 3864 ret = 0; 3865 break; 3866 } 3867 return ret ?: nbytes; 3868 } 3869 3870 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3871 size_t nbytes, loff_t off) 3872 { 3873 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3874 struct page_counter *counter; 3875 3876 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3877 case _MEM: 3878 counter = &memcg->memory; 3879 break; 3880 case _MEMSWAP: 3881 counter = &memcg->memsw; 3882 break; 3883 case _KMEM: 3884 counter = &memcg->kmem; 3885 break; 3886 case _TCP: 3887 counter = &memcg->tcpmem; 3888 break; 3889 default: 3890 BUG(); 3891 } 3892 3893 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3894 case RES_MAX_USAGE: 3895 page_counter_reset_watermark(counter); 3896 break; 3897 case RES_FAILCNT: 3898 counter->failcnt = 0; 3899 break; 3900 default: 3901 BUG(); 3902 } 3903 3904 return nbytes; 3905 } 3906 3907 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3908 struct cftype *cft) 3909 { 3910 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3911 } 3912 3913 #ifdef CONFIG_MMU 3914 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3915 struct cftype *cft, u64 val) 3916 { 3917 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3918 3919 if (val & ~MOVE_MASK) 3920 return -EINVAL; 3921 3922 /* 3923 * No kind of locking is needed in here, because ->can_attach() will 3924 * check this value once in the beginning of the process, and then carry 3925 * on with stale data. This means that changes to this value will only 3926 * affect task migrations starting after the change. 3927 */ 3928 memcg->move_charge_at_immigrate = val; 3929 return 0; 3930 } 3931 #else 3932 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3933 struct cftype *cft, u64 val) 3934 { 3935 return -ENOSYS; 3936 } 3937 #endif 3938 3939 #ifdef CONFIG_NUMA 3940 3941 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3942 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3943 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3944 3945 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3946 int nid, unsigned int lru_mask, bool tree) 3947 { 3948 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3949 unsigned long nr = 0; 3950 enum lru_list lru; 3951 3952 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3953 3954 for_each_lru(lru) { 3955 if (!(BIT(lru) & lru_mask)) 3956 continue; 3957 if (tree) 3958 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3959 else 3960 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3961 } 3962 return nr; 3963 } 3964 3965 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3966 unsigned int lru_mask, 3967 bool tree) 3968 { 3969 unsigned long nr = 0; 3970 enum lru_list lru; 3971 3972 for_each_lru(lru) { 3973 if (!(BIT(lru) & lru_mask)) 3974 continue; 3975 if (tree) 3976 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3977 else 3978 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3979 } 3980 return nr; 3981 } 3982 3983 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3984 { 3985 struct numa_stat { 3986 const char *name; 3987 unsigned int lru_mask; 3988 }; 3989 3990 static const struct numa_stat stats[] = { 3991 { "total", LRU_ALL }, 3992 { "file", LRU_ALL_FILE }, 3993 { "anon", LRU_ALL_ANON }, 3994 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3995 }; 3996 const struct numa_stat *stat; 3997 int nid; 3998 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3999 4000 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4001 seq_printf(m, "%s=%lu", stat->name, 4002 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4003 false)); 4004 for_each_node_state(nid, N_MEMORY) 4005 seq_printf(m, " N%d=%lu", nid, 4006 mem_cgroup_node_nr_lru_pages(memcg, nid, 4007 stat->lru_mask, false)); 4008 seq_putc(m, '\n'); 4009 } 4010 4011 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4012 4013 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4014 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4015 true)); 4016 for_each_node_state(nid, N_MEMORY) 4017 seq_printf(m, " N%d=%lu", nid, 4018 mem_cgroup_node_nr_lru_pages(memcg, nid, 4019 stat->lru_mask, true)); 4020 seq_putc(m, '\n'); 4021 } 4022 4023 return 0; 4024 } 4025 #endif /* CONFIG_NUMA */ 4026 4027 static const unsigned int memcg1_stats[] = { 4028 NR_FILE_PAGES, 4029 NR_ANON_MAPPED, 4030 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4031 NR_ANON_THPS, 4032 #endif 4033 NR_SHMEM, 4034 NR_FILE_MAPPED, 4035 NR_FILE_DIRTY, 4036 NR_WRITEBACK, 4037 MEMCG_SWAP, 4038 }; 4039 4040 static const char *const memcg1_stat_names[] = { 4041 "cache", 4042 "rss", 4043 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4044 "rss_huge", 4045 #endif 4046 "shmem", 4047 "mapped_file", 4048 "dirty", 4049 "writeback", 4050 "swap", 4051 }; 4052 4053 /* Universal VM events cgroup1 shows, original sort order */ 4054 static const unsigned int memcg1_events[] = { 4055 PGPGIN, 4056 PGPGOUT, 4057 PGFAULT, 4058 PGMAJFAULT, 4059 }; 4060 4061 static int memcg_stat_show(struct seq_file *m, void *v) 4062 { 4063 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4064 unsigned long memory, memsw; 4065 struct mem_cgroup *mi; 4066 unsigned int i; 4067 4068 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4069 4070 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4071 unsigned long nr; 4072 4073 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4074 continue; 4075 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4076 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4077 if (memcg1_stats[i] == NR_ANON_THPS) 4078 nr *= HPAGE_PMD_NR; 4079 #endif 4080 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4081 } 4082 4083 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4084 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4085 memcg_events_local(memcg, memcg1_events[i])); 4086 4087 for (i = 0; i < NR_LRU_LISTS; i++) 4088 seq_printf(m, "%s %lu\n", lru_list_name(i), 4089 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4090 PAGE_SIZE); 4091 4092 /* Hierarchical information */ 4093 memory = memsw = PAGE_COUNTER_MAX; 4094 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4095 memory = min(memory, READ_ONCE(mi->memory.max)); 4096 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4097 } 4098 seq_printf(m, "hierarchical_memory_limit %llu\n", 4099 (u64)memory * PAGE_SIZE); 4100 if (do_memsw_account()) 4101 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4102 (u64)memsw * PAGE_SIZE); 4103 4104 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4105 unsigned long nr; 4106 4107 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4108 continue; 4109 nr = memcg_page_state(memcg, memcg1_stats[i]); 4110 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4111 if (memcg1_stats[i] == NR_ANON_THPS) 4112 nr *= HPAGE_PMD_NR; 4113 #endif 4114 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4115 (u64)nr * PAGE_SIZE); 4116 } 4117 4118 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4119 seq_printf(m, "total_%s %llu\n", 4120 vm_event_name(memcg1_events[i]), 4121 (u64)memcg_events(memcg, memcg1_events[i])); 4122 4123 for (i = 0; i < NR_LRU_LISTS; i++) 4124 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4125 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4126 PAGE_SIZE); 4127 4128 #ifdef CONFIG_DEBUG_VM 4129 { 4130 pg_data_t *pgdat; 4131 struct mem_cgroup_per_node *mz; 4132 unsigned long anon_cost = 0; 4133 unsigned long file_cost = 0; 4134 4135 for_each_online_pgdat(pgdat) { 4136 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 4137 4138 anon_cost += mz->lruvec.anon_cost; 4139 file_cost += mz->lruvec.file_cost; 4140 } 4141 seq_printf(m, "anon_cost %lu\n", anon_cost); 4142 seq_printf(m, "file_cost %lu\n", file_cost); 4143 } 4144 #endif 4145 4146 return 0; 4147 } 4148 4149 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4150 struct cftype *cft) 4151 { 4152 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4153 4154 return mem_cgroup_swappiness(memcg); 4155 } 4156 4157 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4158 struct cftype *cft, u64 val) 4159 { 4160 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4161 4162 if (val > 100) 4163 return -EINVAL; 4164 4165 if (css->parent) 4166 memcg->swappiness = val; 4167 else 4168 vm_swappiness = val; 4169 4170 return 0; 4171 } 4172 4173 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4174 { 4175 struct mem_cgroup_threshold_ary *t; 4176 unsigned long usage; 4177 int i; 4178 4179 rcu_read_lock(); 4180 if (!swap) 4181 t = rcu_dereference(memcg->thresholds.primary); 4182 else 4183 t = rcu_dereference(memcg->memsw_thresholds.primary); 4184 4185 if (!t) 4186 goto unlock; 4187 4188 usage = mem_cgroup_usage(memcg, swap); 4189 4190 /* 4191 * current_threshold points to threshold just below or equal to usage. 4192 * If it's not true, a threshold was crossed after last 4193 * call of __mem_cgroup_threshold(). 4194 */ 4195 i = t->current_threshold; 4196 4197 /* 4198 * Iterate backward over array of thresholds starting from 4199 * current_threshold and check if a threshold is crossed. 4200 * If none of thresholds below usage is crossed, we read 4201 * only one element of the array here. 4202 */ 4203 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4204 eventfd_signal(t->entries[i].eventfd, 1); 4205 4206 /* i = current_threshold + 1 */ 4207 i++; 4208 4209 /* 4210 * Iterate forward over array of thresholds starting from 4211 * current_threshold+1 and check if a threshold is crossed. 4212 * If none of thresholds above usage is crossed, we read 4213 * only one element of the array here. 4214 */ 4215 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4216 eventfd_signal(t->entries[i].eventfd, 1); 4217 4218 /* Update current_threshold */ 4219 t->current_threshold = i - 1; 4220 unlock: 4221 rcu_read_unlock(); 4222 } 4223 4224 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4225 { 4226 while (memcg) { 4227 __mem_cgroup_threshold(memcg, false); 4228 if (do_memsw_account()) 4229 __mem_cgroup_threshold(memcg, true); 4230 4231 memcg = parent_mem_cgroup(memcg); 4232 } 4233 } 4234 4235 static int compare_thresholds(const void *a, const void *b) 4236 { 4237 const struct mem_cgroup_threshold *_a = a; 4238 const struct mem_cgroup_threshold *_b = b; 4239 4240 if (_a->threshold > _b->threshold) 4241 return 1; 4242 4243 if (_a->threshold < _b->threshold) 4244 return -1; 4245 4246 return 0; 4247 } 4248 4249 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4250 { 4251 struct mem_cgroup_eventfd_list *ev; 4252 4253 spin_lock(&memcg_oom_lock); 4254 4255 list_for_each_entry(ev, &memcg->oom_notify, list) 4256 eventfd_signal(ev->eventfd, 1); 4257 4258 spin_unlock(&memcg_oom_lock); 4259 return 0; 4260 } 4261 4262 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4263 { 4264 struct mem_cgroup *iter; 4265 4266 for_each_mem_cgroup_tree(iter, memcg) 4267 mem_cgroup_oom_notify_cb(iter); 4268 } 4269 4270 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4271 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4272 { 4273 struct mem_cgroup_thresholds *thresholds; 4274 struct mem_cgroup_threshold_ary *new; 4275 unsigned long threshold; 4276 unsigned long usage; 4277 int i, size, ret; 4278 4279 ret = page_counter_memparse(args, "-1", &threshold); 4280 if (ret) 4281 return ret; 4282 4283 mutex_lock(&memcg->thresholds_lock); 4284 4285 if (type == _MEM) { 4286 thresholds = &memcg->thresholds; 4287 usage = mem_cgroup_usage(memcg, false); 4288 } else if (type == _MEMSWAP) { 4289 thresholds = &memcg->memsw_thresholds; 4290 usage = mem_cgroup_usage(memcg, true); 4291 } else 4292 BUG(); 4293 4294 /* Check if a threshold crossed before adding a new one */ 4295 if (thresholds->primary) 4296 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4297 4298 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4299 4300 /* Allocate memory for new array of thresholds */ 4301 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4302 if (!new) { 4303 ret = -ENOMEM; 4304 goto unlock; 4305 } 4306 new->size = size; 4307 4308 /* Copy thresholds (if any) to new array */ 4309 if (thresholds->primary) 4310 memcpy(new->entries, thresholds->primary->entries, 4311 flex_array_size(new, entries, size - 1)); 4312 4313 /* Add new threshold */ 4314 new->entries[size - 1].eventfd = eventfd; 4315 new->entries[size - 1].threshold = threshold; 4316 4317 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4318 sort(new->entries, size, sizeof(*new->entries), 4319 compare_thresholds, NULL); 4320 4321 /* Find current threshold */ 4322 new->current_threshold = -1; 4323 for (i = 0; i < size; i++) { 4324 if (new->entries[i].threshold <= usage) { 4325 /* 4326 * new->current_threshold will not be used until 4327 * rcu_assign_pointer(), so it's safe to increment 4328 * it here. 4329 */ 4330 ++new->current_threshold; 4331 } else 4332 break; 4333 } 4334 4335 /* Free old spare buffer and save old primary buffer as spare */ 4336 kfree(thresholds->spare); 4337 thresholds->spare = thresholds->primary; 4338 4339 rcu_assign_pointer(thresholds->primary, new); 4340 4341 /* To be sure that nobody uses thresholds */ 4342 synchronize_rcu(); 4343 4344 unlock: 4345 mutex_unlock(&memcg->thresholds_lock); 4346 4347 return ret; 4348 } 4349 4350 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4351 struct eventfd_ctx *eventfd, const char *args) 4352 { 4353 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4354 } 4355 4356 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4357 struct eventfd_ctx *eventfd, const char *args) 4358 { 4359 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4360 } 4361 4362 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4363 struct eventfd_ctx *eventfd, enum res_type type) 4364 { 4365 struct mem_cgroup_thresholds *thresholds; 4366 struct mem_cgroup_threshold_ary *new; 4367 unsigned long usage; 4368 int i, j, size, entries; 4369 4370 mutex_lock(&memcg->thresholds_lock); 4371 4372 if (type == _MEM) { 4373 thresholds = &memcg->thresholds; 4374 usage = mem_cgroup_usage(memcg, false); 4375 } else if (type == _MEMSWAP) { 4376 thresholds = &memcg->memsw_thresholds; 4377 usage = mem_cgroup_usage(memcg, true); 4378 } else 4379 BUG(); 4380 4381 if (!thresholds->primary) 4382 goto unlock; 4383 4384 /* Check if a threshold crossed before removing */ 4385 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4386 4387 /* Calculate new number of threshold */ 4388 size = entries = 0; 4389 for (i = 0; i < thresholds->primary->size; i++) { 4390 if (thresholds->primary->entries[i].eventfd != eventfd) 4391 size++; 4392 else 4393 entries++; 4394 } 4395 4396 new = thresholds->spare; 4397 4398 /* If no items related to eventfd have been cleared, nothing to do */ 4399 if (!entries) 4400 goto unlock; 4401 4402 /* Set thresholds array to NULL if we don't have thresholds */ 4403 if (!size) { 4404 kfree(new); 4405 new = NULL; 4406 goto swap_buffers; 4407 } 4408 4409 new->size = size; 4410 4411 /* Copy thresholds and find current threshold */ 4412 new->current_threshold = -1; 4413 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4414 if (thresholds->primary->entries[i].eventfd == eventfd) 4415 continue; 4416 4417 new->entries[j] = thresholds->primary->entries[i]; 4418 if (new->entries[j].threshold <= usage) { 4419 /* 4420 * new->current_threshold will not be used 4421 * until rcu_assign_pointer(), so it's safe to increment 4422 * it here. 4423 */ 4424 ++new->current_threshold; 4425 } 4426 j++; 4427 } 4428 4429 swap_buffers: 4430 /* Swap primary and spare array */ 4431 thresholds->spare = thresholds->primary; 4432 4433 rcu_assign_pointer(thresholds->primary, new); 4434 4435 /* To be sure that nobody uses thresholds */ 4436 synchronize_rcu(); 4437 4438 /* If all events are unregistered, free the spare array */ 4439 if (!new) { 4440 kfree(thresholds->spare); 4441 thresholds->spare = NULL; 4442 } 4443 unlock: 4444 mutex_unlock(&memcg->thresholds_lock); 4445 } 4446 4447 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4448 struct eventfd_ctx *eventfd) 4449 { 4450 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4451 } 4452 4453 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4454 struct eventfd_ctx *eventfd) 4455 { 4456 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4457 } 4458 4459 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4460 struct eventfd_ctx *eventfd, const char *args) 4461 { 4462 struct mem_cgroup_eventfd_list *event; 4463 4464 event = kmalloc(sizeof(*event), GFP_KERNEL); 4465 if (!event) 4466 return -ENOMEM; 4467 4468 spin_lock(&memcg_oom_lock); 4469 4470 event->eventfd = eventfd; 4471 list_add(&event->list, &memcg->oom_notify); 4472 4473 /* already in OOM ? */ 4474 if (memcg->under_oom) 4475 eventfd_signal(eventfd, 1); 4476 spin_unlock(&memcg_oom_lock); 4477 4478 return 0; 4479 } 4480 4481 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4482 struct eventfd_ctx *eventfd) 4483 { 4484 struct mem_cgroup_eventfd_list *ev, *tmp; 4485 4486 spin_lock(&memcg_oom_lock); 4487 4488 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4489 if (ev->eventfd == eventfd) { 4490 list_del(&ev->list); 4491 kfree(ev); 4492 } 4493 } 4494 4495 spin_unlock(&memcg_oom_lock); 4496 } 4497 4498 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4499 { 4500 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4501 4502 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4503 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4504 seq_printf(sf, "oom_kill %lu\n", 4505 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4506 return 0; 4507 } 4508 4509 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4510 struct cftype *cft, u64 val) 4511 { 4512 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4513 4514 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4515 if (!css->parent || !((val == 0) || (val == 1))) 4516 return -EINVAL; 4517 4518 memcg->oom_kill_disable = val; 4519 if (!val) 4520 memcg_oom_recover(memcg); 4521 4522 return 0; 4523 } 4524 4525 #ifdef CONFIG_CGROUP_WRITEBACK 4526 4527 #include <trace/events/writeback.h> 4528 4529 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4530 { 4531 return wb_domain_init(&memcg->cgwb_domain, gfp); 4532 } 4533 4534 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4535 { 4536 wb_domain_exit(&memcg->cgwb_domain); 4537 } 4538 4539 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4540 { 4541 wb_domain_size_changed(&memcg->cgwb_domain); 4542 } 4543 4544 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4545 { 4546 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4547 4548 if (!memcg->css.parent) 4549 return NULL; 4550 4551 return &memcg->cgwb_domain; 4552 } 4553 4554 /* 4555 * idx can be of type enum memcg_stat_item or node_stat_item. 4556 * Keep in sync with memcg_exact_page(). 4557 */ 4558 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4559 { 4560 long x = atomic_long_read(&memcg->vmstats[idx]); 4561 int cpu; 4562 4563 for_each_online_cpu(cpu) 4564 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4565 if (x < 0) 4566 x = 0; 4567 return x; 4568 } 4569 4570 /** 4571 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4572 * @wb: bdi_writeback in question 4573 * @pfilepages: out parameter for number of file pages 4574 * @pheadroom: out parameter for number of allocatable pages according to memcg 4575 * @pdirty: out parameter for number of dirty pages 4576 * @pwriteback: out parameter for number of pages under writeback 4577 * 4578 * Determine the numbers of file, headroom, dirty, and writeback pages in 4579 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4580 * is a bit more involved. 4581 * 4582 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4583 * headroom is calculated as the lowest headroom of itself and the 4584 * ancestors. Note that this doesn't consider the actual amount of 4585 * available memory in the system. The caller should further cap 4586 * *@pheadroom accordingly. 4587 */ 4588 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4589 unsigned long *pheadroom, unsigned long *pdirty, 4590 unsigned long *pwriteback) 4591 { 4592 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4593 struct mem_cgroup *parent; 4594 4595 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4596 4597 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4598 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4599 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4600 *pheadroom = PAGE_COUNTER_MAX; 4601 4602 while ((parent = parent_mem_cgroup(memcg))) { 4603 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4604 READ_ONCE(memcg->memory.high)); 4605 unsigned long used = page_counter_read(&memcg->memory); 4606 4607 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4608 memcg = parent; 4609 } 4610 } 4611 4612 /* 4613 * Foreign dirty flushing 4614 * 4615 * There's an inherent mismatch between memcg and writeback. The former 4616 * trackes ownership per-page while the latter per-inode. This was a 4617 * deliberate design decision because honoring per-page ownership in the 4618 * writeback path is complicated, may lead to higher CPU and IO overheads 4619 * and deemed unnecessary given that write-sharing an inode across 4620 * different cgroups isn't a common use-case. 4621 * 4622 * Combined with inode majority-writer ownership switching, this works well 4623 * enough in most cases but there are some pathological cases. For 4624 * example, let's say there are two cgroups A and B which keep writing to 4625 * different but confined parts of the same inode. B owns the inode and 4626 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4627 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4628 * triggering background writeback. A will be slowed down without a way to 4629 * make writeback of the dirty pages happen. 4630 * 4631 * Conditions like the above can lead to a cgroup getting repatedly and 4632 * severely throttled after making some progress after each 4633 * dirty_expire_interval while the underyling IO device is almost 4634 * completely idle. 4635 * 4636 * Solving this problem completely requires matching the ownership tracking 4637 * granularities between memcg and writeback in either direction. However, 4638 * the more egregious behaviors can be avoided by simply remembering the 4639 * most recent foreign dirtying events and initiating remote flushes on 4640 * them when local writeback isn't enough to keep the memory clean enough. 4641 * 4642 * The following two functions implement such mechanism. When a foreign 4643 * page - a page whose memcg and writeback ownerships don't match - is 4644 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4645 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4646 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4647 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4648 * foreign bdi_writebacks which haven't expired. Both the numbers of 4649 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4650 * limited to MEMCG_CGWB_FRN_CNT. 4651 * 4652 * The mechanism only remembers IDs and doesn't hold any object references. 4653 * As being wrong occasionally doesn't matter, updates and accesses to the 4654 * records are lockless and racy. 4655 */ 4656 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4657 struct bdi_writeback *wb) 4658 { 4659 struct mem_cgroup *memcg = page_memcg(page); 4660 struct memcg_cgwb_frn *frn; 4661 u64 now = get_jiffies_64(); 4662 u64 oldest_at = now; 4663 int oldest = -1; 4664 int i; 4665 4666 trace_track_foreign_dirty(page, wb); 4667 4668 /* 4669 * Pick the slot to use. If there is already a slot for @wb, keep 4670 * using it. If not replace the oldest one which isn't being 4671 * written out. 4672 */ 4673 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4674 frn = &memcg->cgwb_frn[i]; 4675 if (frn->bdi_id == wb->bdi->id && 4676 frn->memcg_id == wb->memcg_css->id) 4677 break; 4678 if (time_before64(frn->at, oldest_at) && 4679 atomic_read(&frn->done.cnt) == 1) { 4680 oldest = i; 4681 oldest_at = frn->at; 4682 } 4683 } 4684 4685 if (i < MEMCG_CGWB_FRN_CNT) { 4686 /* 4687 * Re-using an existing one. Update timestamp lazily to 4688 * avoid making the cacheline hot. We want them to be 4689 * reasonably up-to-date and significantly shorter than 4690 * dirty_expire_interval as that's what expires the record. 4691 * Use the shorter of 1s and dirty_expire_interval / 8. 4692 */ 4693 unsigned long update_intv = 4694 min_t(unsigned long, HZ, 4695 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4696 4697 if (time_before64(frn->at, now - update_intv)) 4698 frn->at = now; 4699 } else if (oldest >= 0) { 4700 /* replace the oldest free one */ 4701 frn = &memcg->cgwb_frn[oldest]; 4702 frn->bdi_id = wb->bdi->id; 4703 frn->memcg_id = wb->memcg_css->id; 4704 frn->at = now; 4705 } 4706 } 4707 4708 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4709 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4710 { 4711 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4712 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4713 u64 now = jiffies_64; 4714 int i; 4715 4716 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4717 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4718 4719 /* 4720 * If the record is older than dirty_expire_interval, 4721 * writeback on it has already started. No need to kick it 4722 * off again. Also, don't start a new one if there's 4723 * already one in flight. 4724 */ 4725 if (time_after64(frn->at, now - intv) && 4726 atomic_read(&frn->done.cnt) == 1) { 4727 frn->at = 0; 4728 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4729 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4730 WB_REASON_FOREIGN_FLUSH, 4731 &frn->done); 4732 } 4733 } 4734 } 4735 4736 #else /* CONFIG_CGROUP_WRITEBACK */ 4737 4738 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4739 { 4740 return 0; 4741 } 4742 4743 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4744 { 4745 } 4746 4747 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4748 { 4749 } 4750 4751 #endif /* CONFIG_CGROUP_WRITEBACK */ 4752 4753 /* 4754 * DO NOT USE IN NEW FILES. 4755 * 4756 * "cgroup.event_control" implementation. 4757 * 4758 * This is way over-engineered. It tries to support fully configurable 4759 * events for each user. Such level of flexibility is completely 4760 * unnecessary especially in the light of the planned unified hierarchy. 4761 * 4762 * Please deprecate this and replace with something simpler if at all 4763 * possible. 4764 */ 4765 4766 /* 4767 * Unregister event and free resources. 4768 * 4769 * Gets called from workqueue. 4770 */ 4771 static void memcg_event_remove(struct work_struct *work) 4772 { 4773 struct mem_cgroup_event *event = 4774 container_of(work, struct mem_cgroup_event, remove); 4775 struct mem_cgroup *memcg = event->memcg; 4776 4777 remove_wait_queue(event->wqh, &event->wait); 4778 4779 event->unregister_event(memcg, event->eventfd); 4780 4781 /* Notify userspace the event is going away. */ 4782 eventfd_signal(event->eventfd, 1); 4783 4784 eventfd_ctx_put(event->eventfd); 4785 kfree(event); 4786 css_put(&memcg->css); 4787 } 4788 4789 /* 4790 * Gets called on EPOLLHUP on eventfd when user closes it. 4791 * 4792 * Called with wqh->lock held and interrupts disabled. 4793 */ 4794 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4795 int sync, void *key) 4796 { 4797 struct mem_cgroup_event *event = 4798 container_of(wait, struct mem_cgroup_event, wait); 4799 struct mem_cgroup *memcg = event->memcg; 4800 __poll_t flags = key_to_poll(key); 4801 4802 if (flags & EPOLLHUP) { 4803 /* 4804 * If the event has been detached at cgroup removal, we 4805 * can simply return knowing the other side will cleanup 4806 * for us. 4807 * 4808 * We can't race against event freeing since the other 4809 * side will require wqh->lock via remove_wait_queue(), 4810 * which we hold. 4811 */ 4812 spin_lock(&memcg->event_list_lock); 4813 if (!list_empty(&event->list)) { 4814 list_del_init(&event->list); 4815 /* 4816 * We are in atomic context, but cgroup_event_remove() 4817 * may sleep, so we have to call it in workqueue. 4818 */ 4819 schedule_work(&event->remove); 4820 } 4821 spin_unlock(&memcg->event_list_lock); 4822 } 4823 4824 return 0; 4825 } 4826 4827 static void memcg_event_ptable_queue_proc(struct file *file, 4828 wait_queue_head_t *wqh, poll_table *pt) 4829 { 4830 struct mem_cgroup_event *event = 4831 container_of(pt, struct mem_cgroup_event, pt); 4832 4833 event->wqh = wqh; 4834 add_wait_queue(wqh, &event->wait); 4835 } 4836 4837 /* 4838 * DO NOT USE IN NEW FILES. 4839 * 4840 * Parse input and register new cgroup event handler. 4841 * 4842 * Input must be in format '<event_fd> <control_fd> <args>'. 4843 * Interpretation of args is defined by control file implementation. 4844 */ 4845 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4846 char *buf, size_t nbytes, loff_t off) 4847 { 4848 struct cgroup_subsys_state *css = of_css(of); 4849 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4850 struct mem_cgroup_event *event; 4851 struct cgroup_subsys_state *cfile_css; 4852 unsigned int efd, cfd; 4853 struct fd efile; 4854 struct fd cfile; 4855 const char *name; 4856 char *endp; 4857 int ret; 4858 4859 buf = strstrip(buf); 4860 4861 efd = simple_strtoul(buf, &endp, 10); 4862 if (*endp != ' ') 4863 return -EINVAL; 4864 buf = endp + 1; 4865 4866 cfd = simple_strtoul(buf, &endp, 10); 4867 if ((*endp != ' ') && (*endp != '\0')) 4868 return -EINVAL; 4869 buf = endp + 1; 4870 4871 event = kzalloc(sizeof(*event), GFP_KERNEL); 4872 if (!event) 4873 return -ENOMEM; 4874 4875 event->memcg = memcg; 4876 INIT_LIST_HEAD(&event->list); 4877 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4878 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4879 INIT_WORK(&event->remove, memcg_event_remove); 4880 4881 efile = fdget(efd); 4882 if (!efile.file) { 4883 ret = -EBADF; 4884 goto out_kfree; 4885 } 4886 4887 event->eventfd = eventfd_ctx_fileget(efile.file); 4888 if (IS_ERR(event->eventfd)) { 4889 ret = PTR_ERR(event->eventfd); 4890 goto out_put_efile; 4891 } 4892 4893 cfile = fdget(cfd); 4894 if (!cfile.file) { 4895 ret = -EBADF; 4896 goto out_put_eventfd; 4897 } 4898 4899 /* the process need read permission on control file */ 4900 /* AV: shouldn't we check that it's been opened for read instead? */ 4901 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4902 if (ret < 0) 4903 goto out_put_cfile; 4904 4905 /* 4906 * Determine the event callbacks and set them in @event. This used 4907 * to be done via struct cftype but cgroup core no longer knows 4908 * about these events. The following is crude but the whole thing 4909 * is for compatibility anyway. 4910 * 4911 * DO NOT ADD NEW FILES. 4912 */ 4913 name = cfile.file->f_path.dentry->d_name.name; 4914 4915 if (!strcmp(name, "memory.usage_in_bytes")) { 4916 event->register_event = mem_cgroup_usage_register_event; 4917 event->unregister_event = mem_cgroup_usage_unregister_event; 4918 } else if (!strcmp(name, "memory.oom_control")) { 4919 event->register_event = mem_cgroup_oom_register_event; 4920 event->unregister_event = mem_cgroup_oom_unregister_event; 4921 } else if (!strcmp(name, "memory.pressure_level")) { 4922 event->register_event = vmpressure_register_event; 4923 event->unregister_event = vmpressure_unregister_event; 4924 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4925 event->register_event = memsw_cgroup_usage_register_event; 4926 event->unregister_event = memsw_cgroup_usage_unregister_event; 4927 } else { 4928 ret = -EINVAL; 4929 goto out_put_cfile; 4930 } 4931 4932 /* 4933 * Verify @cfile should belong to @css. Also, remaining events are 4934 * automatically removed on cgroup destruction but the removal is 4935 * asynchronous, so take an extra ref on @css. 4936 */ 4937 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4938 &memory_cgrp_subsys); 4939 ret = -EINVAL; 4940 if (IS_ERR(cfile_css)) 4941 goto out_put_cfile; 4942 if (cfile_css != css) { 4943 css_put(cfile_css); 4944 goto out_put_cfile; 4945 } 4946 4947 ret = event->register_event(memcg, event->eventfd, buf); 4948 if (ret) 4949 goto out_put_css; 4950 4951 vfs_poll(efile.file, &event->pt); 4952 4953 spin_lock(&memcg->event_list_lock); 4954 list_add(&event->list, &memcg->event_list); 4955 spin_unlock(&memcg->event_list_lock); 4956 4957 fdput(cfile); 4958 fdput(efile); 4959 4960 return nbytes; 4961 4962 out_put_css: 4963 css_put(css); 4964 out_put_cfile: 4965 fdput(cfile); 4966 out_put_eventfd: 4967 eventfd_ctx_put(event->eventfd); 4968 out_put_efile: 4969 fdput(efile); 4970 out_kfree: 4971 kfree(event); 4972 4973 return ret; 4974 } 4975 4976 static struct cftype mem_cgroup_legacy_files[] = { 4977 { 4978 .name = "usage_in_bytes", 4979 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4980 .read_u64 = mem_cgroup_read_u64, 4981 }, 4982 { 4983 .name = "max_usage_in_bytes", 4984 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4985 .write = mem_cgroup_reset, 4986 .read_u64 = mem_cgroup_read_u64, 4987 }, 4988 { 4989 .name = "limit_in_bytes", 4990 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4991 .write = mem_cgroup_write, 4992 .read_u64 = mem_cgroup_read_u64, 4993 }, 4994 { 4995 .name = "soft_limit_in_bytes", 4996 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4997 .write = mem_cgroup_write, 4998 .read_u64 = mem_cgroup_read_u64, 4999 }, 5000 { 5001 .name = "failcnt", 5002 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5003 .write = mem_cgroup_reset, 5004 .read_u64 = mem_cgroup_read_u64, 5005 }, 5006 { 5007 .name = "stat", 5008 .seq_show = memcg_stat_show, 5009 }, 5010 { 5011 .name = "force_empty", 5012 .write = mem_cgroup_force_empty_write, 5013 }, 5014 { 5015 .name = "use_hierarchy", 5016 .write_u64 = mem_cgroup_hierarchy_write, 5017 .read_u64 = mem_cgroup_hierarchy_read, 5018 }, 5019 { 5020 .name = "cgroup.event_control", /* XXX: for compat */ 5021 .write = memcg_write_event_control, 5022 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5023 }, 5024 { 5025 .name = "swappiness", 5026 .read_u64 = mem_cgroup_swappiness_read, 5027 .write_u64 = mem_cgroup_swappiness_write, 5028 }, 5029 { 5030 .name = "move_charge_at_immigrate", 5031 .read_u64 = mem_cgroup_move_charge_read, 5032 .write_u64 = mem_cgroup_move_charge_write, 5033 }, 5034 { 5035 .name = "oom_control", 5036 .seq_show = mem_cgroup_oom_control_read, 5037 .write_u64 = mem_cgroup_oom_control_write, 5038 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 5039 }, 5040 { 5041 .name = "pressure_level", 5042 }, 5043 #ifdef CONFIG_NUMA 5044 { 5045 .name = "numa_stat", 5046 .seq_show = memcg_numa_stat_show, 5047 }, 5048 #endif 5049 { 5050 .name = "kmem.limit_in_bytes", 5051 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5052 .write = mem_cgroup_write, 5053 .read_u64 = mem_cgroup_read_u64, 5054 }, 5055 { 5056 .name = "kmem.usage_in_bytes", 5057 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5058 .read_u64 = mem_cgroup_read_u64, 5059 }, 5060 { 5061 .name = "kmem.failcnt", 5062 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5063 .write = mem_cgroup_reset, 5064 .read_u64 = mem_cgroup_read_u64, 5065 }, 5066 { 5067 .name = "kmem.max_usage_in_bytes", 5068 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5069 .write = mem_cgroup_reset, 5070 .read_u64 = mem_cgroup_read_u64, 5071 }, 5072 #if defined(CONFIG_MEMCG_KMEM) && \ 5073 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5074 { 5075 .name = "kmem.slabinfo", 5076 .seq_show = memcg_slab_show, 5077 }, 5078 #endif 5079 { 5080 .name = "kmem.tcp.limit_in_bytes", 5081 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5082 .write = mem_cgroup_write, 5083 .read_u64 = mem_cgroup_read_u64, 5084 }, 5085 { 5086 .name = "kmem.tcp.usage_in_bytes", 5087 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5088 .read_u64 = mem_cgroup_read_u64, 5089 }, 5090 { 5091 .name = "kmem.tcp.failcnt", 5092 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5093 .write = mem_cgroup_reset, 5094 .read_u64 = mem_cgroup_read_u64, 5095 }, 5096 { 5097 .name = "kmem.tcp.max_usage_in_bytes", 5098 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5099 .write = mem_cgroup_reset, 5100 .read_u64 = mem_cgroup_read_u64, 5101 }, 5102 { }, /* terminate */ 5103 }; 5104 5105 /* 5106 * Private memory cgroup IDR 5107 * 5108 * Swap-out records and page cache shadow entries need to store memcg 5109 * references in constrained space, so we maintain an ID space that is 5110 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5111 * memory-controlled cgroups to 64k. 5112 * 5113 * However, there usually are many references to the offline CSS after 5114 * the cgroup has been destroyed, such as page cache or reclaimable 5115 * slab objects, that don't need to hang on to the ID. We want to keep 5116 * those dead CSS from occupying IDs, or we might quickly exhaust the 5117 * relatively small ID space and prevent the creation of new cgroups 5118 * even when there are much fewer than 64k cgroups - possibly none. 5119 * 5120 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5121 * be freed and recycled when it's no longer needed, which is usually 5122 * when the CSS is offlined. 5123 * 5124 * The only exception to that are records of swapped out tmpfs/shmem 5125 * pages that need to be attributed to live ancestors on swapin. But 5126 * those references are manageable from userspace. 5127 */ 5128 5129 static DEFINE_IDR(mem_cgroup_idr); 5130 5131 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5132 { 5133 if (memcg->id.id > 0) { 5134 idr_remove(&mem_cgroup_idr, memcg->id.id); 5135 memcg->id.id = 0; 5136 } 5137 } 5138 5139 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5140 unsigned int n) 5141 { 5142 refcount_add(n, &memcg->id.ref); 5143 } 5144 5145 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5146 { 5147 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5148 mem_cgroup_id_remove(memcg); 5149 5150 /* Memcg ID pins CSS */ 5151 css_put(&memcg->css); 5152 } 5153 } 5154 5155 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5156 { 5157 mem_cgroup_id_put_many(memcg, 1); 5158 } 5159 5160 /** 5161 * mem_cgroup_from_id - look up a memcg from a memcg id 5162 * @id: the memcg id to look up 5163 * 5164 * Caller must hold rcu_read_lock(). 5165 */ 5166 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5167 { 5168 WARN_ON_ONCE(!rcu_read_lock_held()); 5169 return idr_find(&mem_cgroup_idr, id); 5170 } 5171 5172 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5173 { 5174 struct mem_cgroup_per_node *pn; 5175 int tmp = node; 5176 /* 5177 * This routine is called against possible nodes. 5178 * But it's BUG to call kmalloc() against offline node. 5179 * 5180 * TODO: this routine can waste much memory for nodes which will 5181 * never be onlined. It's better to use memory hotplug callback 5182 * function. 5183 */ 5184 if (!node_state(node, N_NORMAL_MEMORY)) 5185 tmp = -1; 5186 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5187 if (!pn) 5188 return 1; 5189 5190 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5191 GFP_KERNEL_ACCOUNT); 5192 if (!pn->lruvec_stat_local) { 5193 kfree(pn); 5194 return 1; 5195 } 5196 5197 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, 5198 GFP_KERNEL_ACCOUNT); 5199 if (!pn->lruvec_stat_cpu) { 5200 free_percpu(pn->lruvec_stat_local); 5201 kfree(pn); 5202 return 1; 5203 } 5204 5205 lruvec_init(&pn->lruvec); 5206 pn->usage_in_excess = 0; 5207 pn->on_tree = false; 5208 pn->memcg = memcg; 5209 5210 memcg->nodeinfo[node] = pn; 5211 return 0; 5212 } 5213 5214 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5215 { 5216 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5217 5218 if (!pn) 5219 return; 5220 5221 free_percpu(pn->lruvec_stat_cpu); 5222 free_percpu(pn->lruvec_stat_local); 5223 kfree(pn); 5224 } 5225 5226 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5227 { 5228 int node; 5229 5230 for_each_node(node) 5231 free_mem_cgroup_per_node_info(memcg, node); 5232 free_percpu(memcg->vmstats_percpu); 5233 free_percpu(memcg->vmstats_local); 5234 kfree(memcg); 5235 } 5236 5237 static void mem_cgroup_free(struct mem_cgroup *memcg) 5238 { 5239 memcg_wb_domain_exit(memcg); 5240 /* 5241 * Flush percpu vmstats and vmevents to guarantee the value correctness 5242 * on parent's and all ancestor levels. 5243 */ 5244 memcg_flush_percpu_vmstats(memcg); 5245 memcg_flush_percpu_vmevents(memcg); 5246 __mem_cgroup_free(memcg); 5247 } 5248 5249 static struct mem_cgroup *mem_cgroup_alloc(void) 5250 { 5251 struct mem_cgroup *memcg; 5252 unsigned int size; 5253 int node; 5254 int __maybe_unused i; 5255 long error = -ENOMEM; 5256 5257 size = sizeof(struct mem_cgroup); 5258 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5259 5260 memcg = kzalloc(size, GFP_KERNEL); 5261 if (!memcg) 5262 return ERR_PTR(error); 5263 5264 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5265 1, MEM_CGROUP_ID_MAX, 5266 GFP_KERNEL); 5267 if (memcg->id.id < 0) { 5268 error = memcg->id.id; 5269 goto fail; 5270 } 5271 5272 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5273 GFP_KERNEL_ACCOUNT); 5274 if (!memcg->vmstats_local) 5275 goto fail; 5276 5277 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5278 GFP_KERNEL_ACCOUNT); 5279 if (!memcg->vmstats_percpu) 5280 goto fail; 5281 5282 for_each_node(node) 5283 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5284 goto fail; 5285 5286 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5287 goto fail; 5288 5289 INIT_WORK(&memcg->high_work, high_work_func); 5290 INIT_LIST_HEAD(&memcg->oom_notify); 5291 mutex_init(&memcg->thresholds_lock); 5292 spin_lock_init(&memcg->move_lock); 5293 vmpressure_init(&memcg->vmpressure); 5294 INIT_LIST_HEAD(&memcg->event_list); 5295 spin_lock_init(&memcg->event_list_lock); 5296 memcg->socket_pressure = jiffies; 5297 #ifdef CONFIG_MEMCG_KMEM 5298 memcg->kmemcg_id = -1; 5299 INIT_LIST_HEAD(&memcg->objcg_list); 5300 #endif 5301 #ifdef CONFIG_CGROUP_WRITEBACK 5302 INIT_LIST_HEAD(&memcg->cgwb_list); 5303 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5304 memcg->cgwb_frn[i].done = 5305 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5306 #endif 5307 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5308 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5309 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5310 memcg->deferred_split_queue.split_queue_len = 0; 5311 #endif 5312 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5313 return memcg; 5314 fail: 5315 mem_cgroup_id_remove(memcg); 5316 __mem_cgroup_free(memcg); 5317 return ERR_PTR(error); 5318 } 5319 5320 static struct cgroup_subsys_state * __ref 5321 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5322 { 5323 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5324 struct mem_cgroup *memcg, *old_memcg; 5325 long error = -ENOMEM; 5326 5327 old_memcg = set_active_memcg(parent); 5328 memcg = mem_cgroup_alloc(); 5329 set_active_memcg(old_memcg); 5330 if (IS_ERR(memcg)) 5331 return ERR_CAST(memcg); 5332 5333 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5334 memcg->soft_limit = PAGE_COUNTER_MAX; 5335 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5336 if (parent) { 5337 memcg->swappiness = mem_cgroup_swappiness(parent); 5338 memcg->oom_kill_disable = parent->oom_kill_disable; 5339 } 5340 if (!parent) { 5341 page_counter_init(&memcg->memory, NULL); 5342 page_counter_init(&memcg->swap, NULL); 5343 page_counter_init(&memcg->kmem, NULL); 5344 page_counter_init(&memcg->tcpmem, NULL); 5345 } else if (parent->use_hierarchy) { 5346 memcg->use_hierarchy = true; 5347 page_counter_init(&memcg->memory, &parent->memory); 5348 page_counter_init(&memcg->swap, &parent->swap); 5349 page_counter_init(&memcg->kmem, &parent->kmem); 5350 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5351 } else { 5352 page_counter_init(&memcg->memory, &root_mem_cgroup->memory); 5353 page_counter_init(&memcg->swap, &root_mem_cgroup->swap); 5354 page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); 5355 page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); 5356 /* 5357 * Deeper hierachy with use_hierarchy == false doesn't make 5358 * much sense so let cgroup subsystem know about this 5359 * unfortunate state in our controller. 5360 */ 5361 if (parent != root_mem_cgroup) 5362 memory_cgrp_subsys.broken_hierarchy = true; 5363 } 5364 5365 /* The following stuff does not apply to the root */ 5366 if (!parent) { 5367 root_mem_cgroup = memcg; 5368 return &memcg->css; 5369 } 5370 5371 error = memcg_online_kmem(memcg); 5372 if (error) 5373 goto fail; 5374 5375 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5376 static_branch_inc(&memcg_sockets_enabled_key); 5377 5378 return &memcg->css; 5379 fail: 5380 mem_cgroup_id_remove(memcg); 5381 mem_cgroup_free(memcg); 5382 return ERR_PTR(error); 5383 } 5384 5385 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5386 { 5387 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5388 5389 /* 5390 * A memcg must be visible for memcg_expand_shrinker_maps() 5391 * by the time the maps are allocated. So, we allocate maps 5392 * here, when for_each_mem_cgroup() can't skip it. 5393 */ 5394 if (memcg_alloc_shrinker_maps(memcg)) { 5395 mem_cgroup_id_remove(memcg); 5396 return -ENOMEM; 5397 } 5398 5399 /* Online state pins memcg ID, memcg ID pins CSS */ 5400 refcount_set(&memcg->id.ref, 1); 5401 css_get(css); 5402 return 0; 5403 } 5404 5405 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5406 { 5407 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5408 struct mem_cgroup_event *event, *tmp; 5409 5410 /* 5411 * Unregister events and notify userspace. 5412 * Notify userspace about cgroup removing only after rmdir of cgroup 5413 * directory to avoid race between userspace and kernelspace. 5414 */ 5415 spin_lock(&memcg->event_list_lock); 5416 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5417 list_del_init(&event->list); 5418 schedule_work(&event->remove); 5419 } 5420 spin_unlock(&memcg->event_list_lock); 5421 5422 page_counter_set_min(&memcg->memory, 0); 5423 page_counter_set_low(&memcg->memory, 0); 5424 5425 memcg_offline_kmem(memcg); 5426 wb_memcg_offline(memcg); 5427 5428 drain_all_stock(memcg); 5429 5430 mem_cgroup_id_put(memcg); 5431 } 5432 5433 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5434 { 5435 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5436 5437 invalidate_reclaim_iterators(memcg); 5438 } 5439 5440 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5441 { 5442 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5443 int __maybe_unused i; 5444 5445 #ifdef CONFIG_CGROUP_WRITEBACK 5446 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5447 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5448 #endif 5449 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5450 static_branch_dec(&memcg_sockets_enabled_key); 5451 5452 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5453 static_branch_dec(&memcg_sockets_enabled_key); 5454 5455 vmpressure_cleanup(&memcg->vmpressure); 5456 cancel_work_sync(&memcg->high_work); 5457 mem_cgroup_remove_from_trees(memcg); 5458 memcg_free_shrinker_maps(memcg); 5459 memcg_free_kmem(memcg); 5460 mem_cgroup_free(memcg); 5461 } 5462 5463 /** 5464 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5465 * @css: the target css 5466 * 5467 * Reset the states of the mem_cgroup associated with @css. This is 5468 * invoked when the userland requests disabling on the default hierarchy 5469 * but the memcg is pinned through dependency. The memcg should stop 5470 * applying policies and should revert to the vanilla state as it may be 5471 * made visible again. 5472 * 5473 * The current implementation only resets the essential configurations. 5474 * This needs to be expanded to cover all the visible parts. 5475 */ 5476 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5477 { 5478 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5479 5480 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5481 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5482 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5483 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5484 page_counter_set_min(&memcg->memory, 0); 5485 page_counter_set_low(&memcg->memory, 0); 5486 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5487 memcg->soft_limit = PAGE_COUNTER_MAX; 5488 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5489 memcg_wb_domain_size_changed(memcg); 5490 } 5491 5492 #ifdef CONFIG_MMU 5493 /* Handlers for move charge at task migration. */ 5494 static int mem_cgroup_do_precharge(unsigned long count) 5495 { 5496 int ret; 5497 5498 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5499 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5500 if (!ret) { 5501 mc.precharge += count; 5502 return ret; 5503 } 5504 5505 /* Try charges one by one with reclaim, but do not retry */ 5506 while (count--) { 5507 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5508 if (ret) 5509 return ret; 5510 mc.precharge++; 5511 cond_resched(); 5512 } 5513 return 0; 5514 } 5515 5516 union mc_target { 5517 struct page *page; 5518 swp_entry_t ent; 5519 }; 5520 5521 enum mc_target_type { 5522 MC_TARGET_NONE = 0, 5523 MC_TARGET_PAGE, 5524 MC_TARGET_SWAP, 5525 MC_TARGET_DEVICE, 5526 }; 5527 5528 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5529 unsigned long addr, pte_t ptent) 5530 { 5531 struct page *page = vm_normal_page(vma, addr, ptent); 5532 5533 if (!page || !page_mapped(page)) 5534 return NULL; 5535 if (PageAnon(page)) { 5536 if (!(mc.flags & MOVE_ANON)) 5537 return NULL; 5538 } else { 5539 if (!(mc.flags & MOVE_FILE)) 5540 return NULL; 5541 } 5542 if (!get_page_unless_zero(page)) 5543 return NULL; 5544 5545 return page; 5546 } 5547 5548 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5549 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5550 pte_t ptent, swp_entry_t *entry) 5551 { 5552 struct page *page = NULL; 5553 swp_entry_t ent = pte_to_swp_entry(ptent); 5554 5555 if (!(mc.flags & MOVE_ANON)) 5556 return NULL; 5557 5558 /* 5559 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5560 * a device and because they are not accessible by CPU they are store 5561 * as special swap entry in the CPU page table. 5562 */ 5563 if (is_device_private_entry(ent)) { 5564 page = device_private_entry_to_page(ent); 5565 /* 5566 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5567 * a refcount of 1 when free (unlike normal page) 5568 */ 5569 if (!page_ref_add_unless(page, 1, 1)) 5570 return NULL; 5571 return page; 5572 } 5573 5574 if (non_swap_entry(ent)) 5575 return NULL; 5576 5577 /* 5578 * Because lookup_swap_cache() updates some statistics counter, 5579 * we call find_get_page() with swapper_space directly. 5580 */ 5581 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5582 entry->val = ent.val; 5583 5584 return page; 5585 } 5586 #else 5587 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5588 pte_t ptent, swp_entry_t *entry) 5589 { 5590 return NULL; 5591 } 5592 #endif 5593 5594 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5595 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5596 { 5597 if (!vma->vm_file) /* anonymous vma */ 5598 return NULL; 5599 if (!(mc.flags & MOVE_FILE)) 5600 return NULL; 5601 5602 /* page is moved even if it's not RSS of this task(page-faulted). */ 5603 /* shmem/tmpfs may report page out on swap: account for that too. */ 5604 return find_get_incore_page(vma->vm_file->f_mapping, 5605 linear_page_index(vma, addr)); 5606 } 5607 5608 /** 5609 * mem_cgroup_move_account - move account of the page 5610 * @page: the page 5611 * @compound: charge the page as compound or small page 5612 * @from: mem_cgroup which the page is moved from. 5613 * @to: mem_cgroup which the page is moved to. @from != @to. 5614 * 5615 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5616 * 5617 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5618 * from old cgroup. 5619 */ 5620 static int mem_cgroup_move_account(struct page *page, 5621 bool compound, 5622 struct mem_cgroup *from, 5623 struct mem_cgroup *to) 5624 { 5625 struct lruvec *from_vec, *to_vec; 5626 struct pglist_data *pgdat; 5627 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5628 int ret; 5629 5630 VM_BUG_ON(from == to); 5631 VM_BUG_ON_PAGE(PageLRU(page), page); 5632 VM_BUG_ON(compound && !PageTransHuge(page)); 5633 5634 /* 5635 * Prevent mem_cgroup_migrate() from looking at 5636 * page's memory cgroup of its source page while we change it. 5637 */ 5638 ret = -EBUSY; 5639 if (!trylock_page(page)) 5640 goto out; 5641 5642 ret = -EINVAL; 5643 if (page_memcg(page) != from) 5644 goto out_unlock; 5645 5646 pgdat = page_pgdat(page); 5647 from_vec = mem_cgroup_lruvec(from, pgdat); 5648 to_vec = mem_cgroup_lruvec(to, pgdat); 5649 5650 lock_page_memcg(page); 5651 5652 if (PageAnon(page)) { 5653 if (page_mapped(page)) { 5654 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5655 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5656 if (PageTransHuge(page)) { 5657 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5658 -nr_pages); 5659 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5660 nr_pages); 5661 } 5662 5663 } 5664 } else { 5665 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5666 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5667 5668 if (PageSwapBacked(page)) { 5669 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5670 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5671 } 5672 5673 if (page_mapped(page)) { 5674 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5675 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5676 } 5677 5678 if (PageDirty(page)) { 5679 struct address_space *mapping = page_mapping(page); 5680 5681 if (mapping_can_writeback(mapping)) { 5682 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5683 -nr_pages); 5684 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5685 nr_pages); 5686 } 5687 } 5688 } 5689 5690 if (PageWriteback(page)) { 5691 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5692 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5693 } 5694 5695 /* 5696 * All state has been migrated, let's switch to the new memcg. 5697 * 5698 * It is safe to change page's memcg here because the page 5699 * is referenced, charged, isolated, and locked: we can't race 5700 * with (un)charging, migration, LRU putback, or anything else 5701 * that would rely on a stable page's memory cgroup. 5702 * 5703 * Note that lock_page_memcg is a memcg lock, not a page lock, 5704 * to save space. As soon as we switch page's memory cgroup to a 5705 * new memcg that isn't locked, the above state can change 5706 * concurrently again. Make sure we're truly done with it. 5707 */ 5708 smp_mb(); 5709 5710 css_get(&to->css); 5711 css_put(&from->css); 5712 5713 page->memcg_data = (unsigned long)to; 5714 5715 __unlock_page_memcg(from); 5716 5717 ret = 0; 5718 5719 local_irq_disable(); 5720 mem_cgroup_charge_statistics(to, page, nr_pages); 5721 memcg_check_events(to, page); 5722 mem_cgroup_charge_statistics(from, page, -nr_pages); 5723 memcg_check_events(from, page); 5724 local_irq_enable(); 5725 out_unlock: 5726 unlock_page(page); 5727 out: 5728 return ret; 5729 } 5730 5731 /** 5732 * get_mctgt_type - get target type of moving charge 5733 * @vma: the vma the pte to be checked belongs 5734 * @addr: the address corresponding to the pte to be checked 5735 * @ptent: the pte to be checked 5736 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5737 * 5738 * Returns 5739 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5740 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5741 * move charge. if @target is not NULL, the page is stored in target->page 5742 * with extra refcnt got(Callers should handle it). 5743 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5744 * target for charge migration. if @target is not NULL, the entry is stored 5745 * in target->ent. 5746 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5747 * (so ZONE_DEVICE page and thus not on the lru). 5748 * For now we such page is charge like a regular page would be as for all 5749 * intent and purposes it is just special memory taking the place of a 5750 * regular page. 5751 * 5752 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5753 * 5754 * Called with pte lock held. 5755 */ 5756 5757 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5758 unsigned long addr, pte_t ptent, union mc_target *target) 5759 { 5760 struct page *page = NULL; 5761 enum mc_target_type ret = MC_TARGET_NONE; 5762 swp_entry_t ent = { .val = 0 }; 5763 5764 if (pte_present(ptent)) 5765 page = mc_handle_present_pte(vma, addr, ptent); 5766 else if (is_swap_pte(ptent)) 5767 page = mc_handle_swap_pte(vma, ptent, &ent); 5768 else if (pte_none(ptent)) 5769 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5770 5771 if (!page && !ent.val) 5772 return ret; 5773 if (page) { 5774 /* 5775 * Do only loose check w/o serialization. 5776 * mem_cgroup_move_account() checks the page is valid or 5777 * not under LRU exclusion. 5778 */ 5779 if (page_memcg(page) == mc.from) { 5780 ret = MC_TARGET_PAGE; 5781 if (is_device_private_page(page)) 5782 ret = MC_TARGET_DEVICE; 5783 if (target) 5784 target->page = page; 5785 } 5786 if (!ret || !target) 5787 put_page(page); 5788 } 5789 /* 5790 * There is a swap entry and a page doesn't exist or isn't charged. 5791 * But we cannot move a tail-page in a THP. 5792 */ 5793 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5794 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5795 ret = MC_TARGET_SWAP; 5796 if (target) 5797 target->ent = ent; 5798 } 5799 return ret; 5800 } 5801 5802 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5803 /* 5804 * We don't consider PMD mapped swapping or file mapped pages because THP does 5805 * not support them for now. 5806 * Caller should make sure that pmd_trans_huge(pmd) is true. 5807 */ 5808 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5809 unsigned long addr, pmd_t pmd, union mc_target *target) 5810 { 5811 struct page *page = NULL; 5812 enum mc_target_type ret = MC_TARGET_NONE; 5813 5814 if (unlikely(is_swap_pmd(pmd))) { 5815 VM_BUG_ON(thp_migration_supported() && 5816 !is_pmd_migration_entry(pmd)); 5817 return ret; 5818 } 5819 page = pmd_page(pmd); 5820 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5821 if (!(mc.flags & MOVE_ANON)) 5822 return ret; 5823 if (page_memcg(page) == mc.from) { 5824 ret = MC_TARGET_PAGE; 5825 if (target) { 5826 get_page(page); 5827 target->page = page; 5828 } 5829 } 5830 return ret; 5831 } 5832 #else 5833 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5834 unsigned long addr, pmd_t pmd, union mc_target *target) 5835 { 5836 return MC_TARGET_NONE; 5837 } 5838 #endif 5839 5840 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5841 unsigned long addr, unsigned long end, 5842 struct mm_walk *walk) 5843 { 5844 struct vm_area_struct *vma = walk->vma; 5845 pte_t *pte; 5846 spinlock_t *ptl; 5847 5848 ptl = pmd_trans_huge_lock(pmd, vma); 5849 if (ptl) { 5850 /* 5851 * Note their can not be MC_TARGET_DEVICE for now as we do not 5852 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5853 * this might change. 5854 */ 5855 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5856 mc.precharge += HPAGE_PMD_NR; 5857 spin_unlock(ptl); 5858 return 0; 5859 } 5860 5861 if (pmd_trans_unstable(pmd)) 5862 return 0; 5863 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5864 for (; addr != end; pte++, addr += PAGE_SIZE) 5865 if (get_mctgt_type(vma, addr, *pte, NULL)) 5866 mc.precharge++; /* increment precharge temporarily */ 5867 pte_unmap_unlock(pte - 1, ptl); 5868 cond_resched(); 5869 5870 return 0; 5871 } 5872 5873 static const struct mm_walk_ops precharge_walk_ops = { 5874 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5875 }; 5876 5877 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5878 { 5879 unsigned long precharge; 5880 5881 mmap_read_lock(mm); 5882 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5883 mmap_read_unlock(mm); 5884 5885 precharge = mc.precharge; 5886 mc.precharge = 0; 5887 5888 return precharge; 5889 } 5890 5891 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5892 { 5893 unsigned long precharge = mem_cgroup_count_precharge(mm); 5894 5895 VM_BUG_ON(mc.moving_task); 5896 mc.moving_task = current; 5897 return mem_cgroup_do_precharge(precharge); 5898 } 5899 5900 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5901 static void __mem_cgroup_clear_mc(void) 5902 { 5903 struct mem_cgroup *from = mc.from; 5904 struct mem_cgroup *to = mc.to; 5905 5906 /* we must uncharge all the leftover precharges from mc.to */ 5907 if (mc.precharge) { 5908 cancel_charge(mc.to, mc.precharge); 5909 mc.precharge = 0; 5910 } 5911 /* 5912 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5913 * we must uncharge here. 5914 */ 5915 if (mc.moved_charge) { 5916 cancel_charge(mc.from, mc.moved_charge); 5917 mc.moved_charge = 0; 5918 } 5919 /* we must fixup refcnts and charges */ 5920 if (mc.moved_swap) { 5921 /* uncharge swap account from the old cgroup */ 5922 if (!mem_cgroup_is_root(mc.from)) 5923 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5924 5925 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5926 5927 /* 5928 * we charged both to->memory and to->memsw, so we 5929 * should uncharge to->memory. 5930 */ 5931 if (!mem_cgroup_is_root(mc.to)) 5932 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5933 5934 mc.moved_swap = 0; 5935 } 5936 memcg_oom_recover(from); 5937 memcg_oom_recover(to); 5938 wake_up_all(&mc.waitq); 5939 } 5940 5941 static void mem_cgroup_clear_mc(void) 5942 { 5943 struct mm_struct *mm = mc.mm; 5944 5945 /* 5946 * we must clear moving_task before waking up waiters at the end of 5947 * task migration. 5948 */ 5949 mc.moving_task = NULL; 5950 __mem_cgroup_clear_mc(); 5951 spin_lock(&mc.lock); 5952 mc.from = NULL; 5953 mc.to = NULL; 5954 mc.mm = NULL; 5955 spin_unlock(&mc.lock); 5956 5957 mmput(mm); 5958 } 5959 5960 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5961 { 5962 struct cgroup_subsys_state *css; 5963 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5964 struct mem_cgroup *from; 5965 struct task_struct *leader, *p; 5966 struct mm_struct *mm; 5967 unsigned long move_flags; 5968 int ret = 0; 5969 5970 /* charge immigration isn't supported on the default hierarchy */ 5971 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5972 return 0; 5973 5974 /* 5975 * Multi-process migrations only happen on the default hierarchy 5976 * where charge immigration is not used. Perform charge 5977 * immigration if @tset contains a leader and whine if there are 5978 * multiple. 5979 */ 5980 p = NULL; 5981 cgroup_taskset_for_each_leader(leader, css, tset) { 5982 WARN_ON_ONCE(p); 5983 p = leader; 5984 memcg = mem_cgroup_from_css(css); 5985 } 5986 if (!p) 5987 return 0; 5988 5989 /* 5990 * We are now commited to this value whatever it is. Changes in this 5991 * tunable will only affect upcoming migrations, not the current one. 5992 * So we need to save it, and keep it going. 5993 */ 5994 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5995 if (!move_flags) 5996 return 0; 5997 5998 from = mem_cgroup_from_task(p); 5999 6000 VM_BUG_ON(from == memcg); 6001 6002 mm = get_task_mm(p); 6003 if (!mm) 6004 return 0; 6005 /* We move charges only when we move a owner of the mm */ 6006 if (mm->owner == p) { 6007 VM_BUG_ON(mc.from); 6008 VM_BUG_ON(mc.to); 6009 VM_BUG_ON(mc.precharge); 6010 VM_BUG_ON(mc.moved_charge); 6011 VM_BUG_ON(mc.moved_swap); 6012 6013 spin_lock(&mc.lock); 6014 mc.mm = mm; 6015 mc.from = from; 6016 mc.to = memcg; 6017 mc.flags = move_flags; 6018 spin_unlock(&mc.lock); 6019 /* We set mc.moving_task later */ 6020 6021 ret = mem_cgroup_precharge_mc(mm); 6022 if (ret) 6023 mem_cgroup_clear_mc(); 6024 } else { 6025 mmput(mm); 6026 } 6027 return ret; 6028 } 6029 6030 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6031 { 6032 if (mc.to) 6033 mem_cgroup_clear_mc(); 6034 } 6035 6036 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6037 unsigned long addr, unsigned long end, 6038 struct mm_walk *walk) 6039 { 6040 int ret = 0; 6041 struct vm_area_struct *vma = walk->vma; 6042 pte_t *pte; 6043 spinlock_t *ptl; 6044 enum mc_target_type target_type; 6045 union mc_target target; 6046 struct page *page; 6047 6048 ptl = pmd_trans_huge_lock(pmd, vma); 6049 if (ptl) { 6050 if (mc.precharge < HPAGE_PMD_NR) { 6051 spin_unlock(ptl); 6052 return 0; 6053 } 6054 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6055 if (target_type == MC_TARGET_PAGE) { 6056 page = target.page; 6057 if (!isolate_lru_page(page)) { 6058 if (!mem_cgroup_move_account(page, true, 6059 mc.from, mc.to)) { 6060 mc.precharge -= HPAGE_PMD_NR; 6061 mc.moved_charge += HPAGE_PMD_NR; 6062 } 6063 putback_lru_page(page); 6064 } 6065 put_page(page); 6066 } else if (target_type == MC_TARGET_DEVICE) { 6067 page = target.page; 6068 if (!mem_cgroup_move_account(page, true, 6069 mc.from, mc.to)) { 6070 mc.precharge -= HPAGE_PMD_NR; 6071 mc.moved_charge += HPAGE_PMD_NR; 6072 } 6073 put_page(page); 6074 } 6075 spin_unlock(ptl); 6076 return 0; 6077 } 6078 6079 if (pmd_trans_unstable(pmd)) 6080 return 0; 6081 retry: 6082 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6083 for (; addr != end; addr += PAGE_SIZE) { 6084 pte_t ptent = *(pte++); 6085 bool device = false; 6086 swp_entry_t ent; 6087 6088 if (!mc.precharge) 6089 break; 6090 6091 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6092 case MC_TARGET_DEVICE: 6093 device = true; 6094 fallthrough; 6095 case MC_TARGET_PAGE: 6096 page = target.page; 6097 /* 6098 * We can have a part of the split pmd here. Moving it 6099 * can be done but it would be too convoluted so simply 6100 * ignore such a partial THP and keep it in original 6101 * memcg. There should be somebody mapping the head. 6102 */ 6103 if (PageTransCompound(page)) 6104 goto put; 6105 if (!device && isolate_lru_page(page)) 6106 goto put; 6107 if (!mem_cgroup_move_account(page, false, 6108 mc.from, mc.to)) { 6109 mc.precharge--; 6110 /* we uncharge from mc.from later. */ 6111 mc.moved_charge++; 6112 } 6113 if (!device) 6114 putback_lru_page(page); 6115 put: /* get_mctgt_type() gets the page */ 6116 put_page(page); 6117 break; 6118 case MC_TARGET_SWAP: 6119 ent = target.ent; 6120 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6121 mc.precharge--; 6122 mem_cgroup_id_get_many(mc.to, 1); 6123 /* we fixup other refcnts and charges later. */ 6124 mc.moved_swap++; 6125 } 6126 break; 6127 default: 6128 break; 6129 } 6130 } 6131 pte_unmap_unlock(pte - 1, ptl); 6132 cond_resched(); 6133 6134 if (addr != end) { 6135 /* 6136 * We have consumed all precharges we got in can_attach(). 6137 * We try charge one by one, but don't do any additional 6138 * charges to mc.to if we have failed in charge once in attach() 6139 * phase. 6140 */ 6141 ret = mem_cgroup_do_precharge(1); 6142 if (!ret) 6143 goto retry; 6144 } 6145 6146 return ret; 6147 } 6148 6149 static const struct mm_walk_ops charge_walk_ops = { 6150 .pmd_entry = mem_cgroup_move_charge_pte_range, 6151 }; 6152 6153 static void mem_cgroup_move_charge(void) 6154 { 6155 lru_add_drain_all(); 6156 /* 6157 * Signal lock_page_memcg() to take the memcg's move_lock 6158 * while we're moving its pages to another memcg. Then wait 6159 * for already started RCU-only updates to finish. 6160 */ 6161 atomic_inc(&mc.from->moving_account); 6162 synchronize_rcu(); 6163 retry: 6164 if (unlikely(!mmap_read_trylock(mc.mm))) { 6165 /* 6166 * Someone who are holding the mmap_lock might be waiting in 6167 * waitq. So we cancel all extra charges, wake up all waiters, 6168 * and retry. Because we cancel precharges, we might not be able 6169 * to move enough charges, but moving charge is a best-effort 6170 * feature anyway, so it wouldn't be a big problem. 6171 */ 6172 __mem_cgroup_clear_mc(); 6173 cond_resched(); 6174 goto retry; 6175 } 6176 /* 6177 * When we have consumed all precharges and failed in doing 6178 * additional charge, the page walk just aborts. 6179 */ 6180 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6181 NULL); 6182 6183 mmap_read_unlock(mc.mm); 6184 atomic_dec(&mc.from->moving_account); 6185 } 6186 6187 static void mem_cgroup_move_task(void) 6188 { 6189 if (mc.to) { 6190 mem_cgroup_move_charge(); 6191 mem_cgroup_clear_mc(); 6192 } 6193 } 6194 #else /* !CONFIG_MMU */ 6195 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6196 { 6197 return 0; 6198 } 6199 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6200 { 6201 } 6202 static void mem_cgroup_move_task(void) 6203 { 6204 } 6205 #endif 6206 6207 /* 6208 * Cgroup retains root cgroups across [un]mount cycles making it necessary 6209 * to verify whether we're attached to the default hierarchy on each mount 6210 * attempt. 6211 */ 6212 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 6213 { 6214 /* 6215 * use_hierarchy is forced on the default hierarchy. cgroup core 6216 * guarantees that @root doesn't have any children, so turning it 6217 * on for the root memcg is enough. 6218 */ 6219 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6220 root_mem_cgroup->use_hierarchy = true; 6221 else 6222 root_mem_cgroup->use_hierarchy = false; 6223 } 6224 6225 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6226 { 6227 if (value == PAGE_COUNTER_MAX) 6228 seq_puts(m, "max\n"); 6229 else 6230 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6231 6232 return 0; 6233 } 6234 6235 static u64 memory_current_read(struct cgroup_subsys_state *css, 6236 struct cftype *cft) 6237 { 6238 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6239 6240 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6241 } 6242 6243 static int memory_min_show(struct seq_file *m, void *v) 6244 { 6245 return seq_puts_memcg_tunable(m, 6246 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6247 } 6248 6249 static ssize_t memory_min_write(struct kernfs_open_file *of, 6250 char *buf, size_t nbytes, loff_t off) 6251 { 6252 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6253 unsigned long min; 6254 int err; 6255 6256 buf = strstrip(buf); 6257 err = page_counter_memparse(buf, "max", &min); 6258 if (err) 6259 return err; 6260 6261 page_counter_set_min(&memcg->memory, min); 6262 6263 return nbytes; 6264 } 6265 6266 static int memory_low_show(struct seq_file *m, void *v) 6267 { 6268 return seq_puts_memcg_tunable(m, 6269 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6270 } 6271 6272 static ssize_t memory_low_write(struct kernfs_open_file *of, 6273 char *buf, size_t nbytes, loff_t off) 6274 { 6275 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6276 unsigned long low; 6277 int err; 6278 6279 buf = strstrip(buf); 6280 err = page_counter_memparse(buf, "max", &low); 6281 if (err) 6282 return err; 6283 6284 page_counter_set_low(&memcg->memory, low); 6285 6286 return nbytes; 6287 } 6288 6289 static int memory_high_show(struct seq_file *m, void *v) 6290 { 6291 return seq_puts_memcg_tunable(m, 6292 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6293 } 6294 6295 static ssize_t memory_high_write(struct kernfs_open_file *of, 6296 char *buf, size_t nbytes, loff_t off) 6297 { 6298 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6299 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6300 bool drained = false; 6301 unsigned long high; 6302 int err; 6303 6304 buf = strstrip(buf); 6305 err = page_counter_memparse(buf, "max", &high); 6306 if (err) 6307 return err; 6308 6309 for (;;) { 6310 unsigned long nr_pages = page_counter_read(&memcg->memory); 6311 unsigned long reclaimed; 6312 6313 if (nr_pages <= high) 6314 break; 6315 6316 if (signal_pending(current)) 6317 break; 6318 6319 if (!drained) { 6320 drain_all_stock(memcg); 6321 drained = true; 6322 continue; 6323 } 6324 6325 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6326 GFP_KERNEL, true); 6327 6328 if (!reclaimed && !nr_retries--) 6329 break; 6330 } 6331 6332 page_counter_set_high(&memcg->memory, high); 6333 6334 memcg_wb_domain_size_changed(memcg); 6335 6336 return nbytes; 6337 } 6338 6339 static int memory_max_show(struct seq_file *m, void *v) 6340 { 6341 return seq_puts_memcg_tunable(m, 6342 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6343 } 6344 6345 static ssize_t memory_max_write(struct kernfs_open_file *of, 6346 char *buf, size_t nbytes, loff_t off) 6347 { 6348 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6349 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6350 bool drained = false; 6351 unsigned long max; 6352 int err; 6353 6354 buf = strstrip(buf); 6355 err = page_counter_memparse(buf, "max", &max); 6356 if (err) 6357 return err; 6358 6359 xchg(&memcg->memory.max, max); 6360 6361 for (;;) { 6362 unsigned long nr_pages = page_counter_read(&memcg->memory); 6363 6364 if (nr_pages <= max) 6365 break; 6366 6367 if (signal_pending(current)) 6368 break; 6369 6370 if (!drained) { 6371 drain_all_stock(memcg); 6372 drained = true; 6373 continue; 6374 } 6375 6376 if (nr_reclaims) { 6377 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6378 GFP_KERNEL, true)) 6379 nr_reclaims--; 6380 continue; 6381 } 6382 6383 memcg_memory_event(memcg, MEMCG_OOM); 6384 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6385 break; 6386 } 6387 6388 memcg_wb_domain_size_changed(memcg); 6389 return nbytes; 6390 } 6391 6392 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6393 { 6394 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6395 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6396 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6397 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6398 seq_printf(m, "oom_kill %lu\n", 6399 atomic_long_read(&events[MEMCG_OOM_KILL])); 6400 } 6401 6402 static int memory_events_show(struct seq_file *m, void *v) 6403 { 6404 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6405 6406 __memory_events_show(m, memcg->memory_events); 6407 return 0; 6408 } 6409 6410 static int memory_events_local_show(struct seq_file *m, void *v) 6411 { 6412 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6413 6414 __memory_events_show(m, memcg->memory_events_local); 6415 return 0; 6416 } 6417 6418 static int memory_stat_show(struct seq_file *m, void *v) 6419 { 6420 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6421 char *buf; 6422 6423 buf = memory_stat_format(memcg); 6424 if (!buf) 6425 return -ENOMEM; 6426 seq_puts(m, buf); 6427 kfree(buf); 6428 return 0; 6429 } 6430 6431 #ifdef CONFIG_NUMA 6432 static int memory_numa_stat_show(struct seq_file *m, void *v) 6433 { 6434 int i; 6435 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6436 6437 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6438 int nid; 6439 6440 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6441 continue; 6442 6443 seq_printf(m, "%s", memory_stats[i].name); 6444 for_each_node_state(nid, N_MEMORY) { 6445 u64 size; 6446 struct lruvec *lruvec; 6447 6448 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6449 size = lruvec_page_state(lruvec, memory_stats[i].idx); 6450 size *= memory_stats[i].ratio; 6451 seq_printf(m, " N%d=%llu", nid, size); 6452 } 6453 seq_putc(m, '\n'); 6454 } 6455 6456 return 0; 6457 } 6458 #endif 6459 6460 static int memory_oom_group_show(struct seq_file *m, void *v) 6461 { 6462 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6463 6464 seq_printf(m, "%d\n", memcg->oom_group); 6465 6466 return 0; 6467 } 6468 6469 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6470 char *buf, size_t nbytes, loff_t off) 6471 { 6472 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6473 int ret, oom_group; 6474 6475 buf = strstrip(buf); 6476 if (!buf) 6477 return -EINVAL; 6478 6479 ret = kstrtoint(buf, 0, &oom_group); 6480 if (ret) 6481 return ret; 6482 6483 if (oom_group != 0 && oom_group != 1) 6484 return -EINVAL; 6485 6486 memcg->oom_group = oom_group; 6487 6488 return nbytes; 6489 } 6490 6491 static struct cftype memory_files[] = { 6492 { 6493 .name = "current", 6494 .flags = CFTYPE_NOT_ON_ROOT, 6495 .read_u64 = memory_current_read, 6496 }, 6497 { 6498 .name = "min", 6499 .flags = CFTYPE_NOT_ON_ROOT, 6500 .seq_show = memory_min_show, 6501 .write = memory_min_write, 6502 }, 6503 { 6504 .name = "low", 6505 .flags = CFTYPE_NOT_ON_ROOT, 6506 .seq_show = memory_low_show, 6507 .write = memory_low_write, 6508 }, 6509 { 6510 .name = "high", 6511 .flags = CFTYPE_NOT_ON_ROOT, 6512 .seq_show = memory_high_show, 6513 .write = memory_high_write, 6514 }, 6515 { 6516 .name = "max", 6517 .flags = CFTYPE_NOT_ON_ROOT, 6518 .seq_show = memory_max_show, 6519 .write = memory_max_write, 6520 }, 6521 { 6522 .name = "events", 6523 .flags = CFTYPE_NOT_ON_ROOT, 6524 .file_offset = offsetof(struct mem_cgroup, events_file), 6525 .seq_show = memory_events_show, 6526 }, 6527 { 6528 .name = "events.local", 6529 .flags = CFTYPE_NOT_ON_ROOT, 6530 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6531 .seq_show = memory_events_local_show, 6532 }, 6533 { 6534 .name = "stat", 6535 .seq_show = memory_stat_show, 6536 }, 6537 #ifdef CONFIG_NUMA 6538 { 6539 .name = "numa_stat", 6540 .seq_show = memory_numa_stat_show, 6541 }, 6542 #endif 6543 { 6544 .name = "oom.group", 6545 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6546 .seq_show = memory_oom_group_show, 6547 .write = memory_oom_group_write, 6548 }, 6549 { } /* terminate */ 6550 }; 6551 6552 struct cgroup_subsys memory_cgrp_subsys = { 6553 .css_alloc = mem_cgroup_css_alloc, 6554 .css_online = mem_cgroup_css_online, 6555 .css_offline = mem_cgroup_css_offline, 6556 .css_released = mem_cgroup_css_released, 6557 .css_free = mem_cgroup_css_free, 6558 .css_reset = mem_cgroup_css_reset, 6559 .can_attach = mem_cgroup_can_attach, 6560 .cancel_attach = mem_cgroup_cancel_attach, 6561 .post_attach = mem_cgroup_move_task, 6562 .bind = mem_cgroup_bind, 6563 .dfl_cftypes = memory_files, 6564 .legacy_cftypes = mem_cgroup_legacy_files, 6565 .early_init = 0, 6566 }; 6567 6568 /* 6569 * This function calculates an individual cgroup's effective 6570 * protection which is derived from its own memory.min/low, its 6571 * parent's and siblings' settings, as well as the actual memory 6572 * distribution in the tree. 6573 * 6574 * The following rules apply to the effective protection values: 6575 * 6576 * 1. At the first level of reclaim, effective protection is equal to 6577 * the declared protection in memory.min and memory.low. 6578 * 6579 * 2. To enable safe delegation of the protection configuration, at 6580 * subsequent levels the effective protection is capped to the 6581 * parent's effective protection. 6582 * 6583 * 3. To make complex and dynamic subtrees easier to configure, the 6584 * user is allowed to overcommit the declared protection at a given 6585 * level. If that is the case, the parent's effective protection is 6586 * distributed to the children in proportion to how much protection 6587 * they have declared and how much of it they are utilizing. 6588 * 6589 * This makes distribution proportional, but also work-conserving: 6590 * if one cgroup claims much more protection than it uses memory, 6591 * the unused remainder is available to its siblings. 6592 * 6593 * 4. Conversely, when the declared protection is undercommitted at a 6594 * given level, the distribution of the larger parental protection 6595 * budget is NOT proportional. A cgroup's protection from a sibling 6596 * is capped to its own memory.min/low setting. 6597 * 6598 * 5. However, to allow protecting recursive subtrees from each other 6599 * without having to declare each individual cgroup's fixed share 6600 * of the ancestor's claim to protection, any unutilized - 6601 * "floating" - protection from up the tree is distributed in 6602 * proportion to each cgroup's *usage*. This makes the protection 6603 * neutral wrt sibling cgroups and lets them compete freely over 6604 * the shared parental protection budget, but it protects the 6605 * subtree as a whole from neighboring subtrees. 6606 * 6607 * Note that 4. and 5. are not in conflict: 4. is about protecting 6608 * against immediate siblings whereas 5. is about protecting against 6609 * neighboring subtrees. 6610 */ 6611 static unsigned long effective_protection(unsigned long usage, 6612 unsigned long parent_usage, 6613 unsigned long setting, 6614 unsigned long parent_effective, 6615 unsigned long siblings_protected) 6616 { 6617 unsigned long protected; 6618 unsigned long ep; 6619 6620 protected = min(usage, setting); 6621 /* 6622 * If all cgroups at this level combined claim and use more 6623 * protection then what the parent affords them, distribute 6624 * shares in proportion to utilization. 6625 * 6626 * We are using actual utilization rather than the statically 6627 * claimed protection in order to be work-conserving: claimed 6628 * but unused protection is available to siblings that would 6629 * otherwise get a smaller chunk than what they claimed. 6630 */ 6631 if (siblings_protected > parent_effective) 6632 return protected * parent_effective / siblings_protected; 6633 6634 /* 6635 * Ok, utilized protection of all children is within what the 6636 * parent affords them, so we know whatever this child claims 6637 * and utilizes is effectively protected. 6638 * 6639 * If there is unprotected usage beyond this value, reclaim 6640 * will apply pressure in proportion to that amount. 6641 * 6642 * If there is unutilized protection, the cgroup will be fully 6643 * shielded from reclaim, but we do return a smaller value for 6644 * protection than what the group could enjoy in theory. This 6645 * is okay. With the overcommit distribution above, effective 6646 * protection is always dependent on how memory is actually 6647 * consumed among the siblings anyway. 6648 */ 6649 ep = protected; 6650 6651 /* 6652 * If the children aren't claiming (all of) the protection 6653 * afforded to them by the parent, distribute the remainder in 6654 * proportion to the (unprotected) memory of each cgroup. That 6655 * way, cgroups that aren't explicitly prioritized wrt each 6656 * other compete freely over the allowance, but they are 6657 * collectively protected from neighboring trees. 6658 * 6659 * We're using unprotected memory for the weight so that if 6660 * some cgroups DO claim explicit protection, we don't protect 6661 * the same bytes twice. 6662 * 6663 * Check both usage and parent_usage against the respective 6664 * protected values. One should imply the other, but they 6665 * aren't read atomically - make sure the division is sane. 6666 */ 6667 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6668 return ep; 6669 if (parent_effective > siblings_protected && 6670 parent_usage > siblings_protected && 6671 usage > protected) { 6672 unsigned long unclaimed; 6673 6674 unclaimed = parent_effective - siblings_protected; 6675 unclaimed *= usage - protected; 6676 unclaimed /= parent_usage - siblings_protected; 6677 6678 ep += unclaimed; 6679 } 6680 6681 return ep; 6682 } 6683 6684 /** 6685 * mem_cgroup_protected - check if memory consumption is in the normal range 6686 * @root: the top ancestor of the sub-tree being checked 6687 * @memcg: the memory cgroup to check 6688 * 6689 * WARNING: This function is not stateless! It can only be used as part 6690 * of a top-down tree iteration, not for isolated queries. 6691 */ 6692 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6693 struct mem_cgroup *memcg) 6694 { 6695 unsigned long usage, parent_usage; 6696 struct mem_cgroup *parent; 6697 6698 if (mem_cgroup_disabled()) 6699 return; 6700 6701 if (!root) 6702 root = root_mem_cgroup; 6703 6704 /* 6705 * Effective values of the reclaim targets are ignored so they 6706 * can be stale. Have a look at mem_cgroup_protection for more 6707 * details. 6708 * TODO: calculation should be more robust so that we do not need 6709 * that special casing. 6710 */ 6711 if (memcg == root) 6712 return; 6713 6714 usage = page_counter_read(&memcg->memory); 6715 if (!usage) 6716 return; 6717 6718 parent = parent_mem_cgroup(memcg); 6719 /* No parent means a non-hierarchical mode on v1 memcg */ 6720 if (!parent) 6721 return; 6722 6723 if (parent == root) { 6724 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6725 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6726 return; 6727 } 6728 6729 parent_usage = page_counter_read(&parent->memory); 6730 6731 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6732 READ_ONCE(memcg->memory.min), 6733 READ_ONCE(parent->memory.emin), 6734 atomic_long_read(&parent->memory.children_min_usage))); 6735 6736 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6737 READ_ONCE(memcg->memory.low), 6738 READ_ONCE(parent->memory.elow), 6739 atomic_long_read(&parent->memory.children_low_usage))); 6740 } 6741 6742 /** 6743 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6744 * @page: page to charge 6745 * @mm: mm context of the victim 6746 * @gfp_mask: reclaim mode 6747 * 6748 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6749 * pages according to @gfp_mask if necessary. 6750 * 6751 * Returns 0 on success. Otherwise, an error code is returned. 6752 */ 6753 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6754 { 6755 unsigned int nr_pages = thp_nr_pages(page); 6756 struct mem_cgroup *memcg = NULL; 6757 int ret = 0; 6758 6759 if (mem_cgroup_disabled()) 6760 goto out; 6761 6762 if (PageSwapCache(page)) { 6763 swp_entry_t ent = { .val = page_private(page), }; 6764 unsigned short id; 6765 6766 /* 6767 * Every swap fault against a single page tries to charge the 6768 * page, bail as early as possible. shmem_unuse() encounters 6769 * already charged pages, too. page and memcg binding is 6770 * protected by the page lock, which serializes swap cache 6771 * removal, which in turn serializes uncharging. 6772 */ 6773 VM_BUG_ON_PAGE(!PageLocked(page), page); 6774 if (page_memcg(compound_head(page))) 6775 goto out; 6776 6777 id = lookup_swap_cgroup_id(ent); 6778 rcu_read_lock(); 6779 memcg = mem_cgroup_from_id(id); 6780 if (memcg && !css_tryget_online(&memcg->css)) 6781 memcg = NULL; 6782 rcu_read_unlock(); 6783 } 6784 6785 if (!memcg) 6786 memcg = get_mem_cgroup_from_mm(mm); 6787 6788 ret = try_charge(memcg, gfp_mask, nr_pages); 6789 if (ret) 6790 goto out_put; 6791 6792 css_get(&memcg->css); 6793 commit_charge(page, memcg); 6794 6795 local_irq_disable(); 6796 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6797 memcg_check_events(memcg, page); 6798 local_irq_enable(); 6799 6800 if (PageSwapCache(page)) { 6801 swp_entry_t entry = { .val = page_private(page) }; 6802 /* 6803 * The swap entry might not get freed for a long time, 6804 * let's not wait for it. The page already received a 6805 * memory+swap charge, drop the swap entry duplicate. 6806 */ 6807 mem_cgroup_uncharge_swap(entry, nr_pages); 6808 } 6809 6810 out_put: 6811 css_put(&memcg->css); 6812 out: 6813 return ret; 6814 } 6815 6816 struct uncharge_gather { 6817 struct mem_cgroup *memcg; 6818 unsigned long nr_pages; 6819 unsigned long pgpgout; 6820 unsigned long nr_kmem; 6821 struct page *dummy_page; 6822 }; 6823 6824 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6825 { 6826 memset(ug, 0, sizeof(*ug)); 6827 } 6828 6829 static void uncharge_batch(const struct uncharge_gather *ug) 6830 { 6831 unsigned long flags; 6832 6833 if (!mem_cgroup_is_root(ug->memcg)) { 6834 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); 6835 if (do_memsw_account()) 6836 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); 6837 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6838 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6839 memcg_oom_recover(ug->memcg); 6840 } 6841 6842 local_irq_save(flags); 6843 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6844 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); 6845 memcg_check_events(ug->memcg, ug->dummy_page); 6846 local_irq_restore(flags); 6847 6848 /* drop reference from uncharge_page */ 6849 css_put(&ug->memcg->css); 6850 } 6851 6852 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6853 { 6854 unsigned long nr_pages; 6855 6856 VM_BUG_ON_PAGE(PageLRU(page), page); 6857 6858 if (!page_memcg(page)) 6859 return; 6860 6861 /* 6862 * Nobody should be changing or seriously looking at 6863 * page_memcg(page) at this point, we have fully 6864 * exclusive access to the page. 6865 */ 6866 6867 if (ug->memcg != page_memcg(page)) { 6868 if (ug->memcg) { 6869 uncharge_batch(ug); 6870 uncharge_gather_clear(ug); 6871 } 6872 ug->memcg = page_memcg(page); 6873 6874 /* pairs with css_put in uncharge_batch */ 6875 css_get(&ug->memcg->css); 6876 } 6877 6878 nr_pages = compound_nr(page); 6879 ug->nr_pages += nr_pages; 6880 6881 if (PageMemcgKmem(page)) 6882 ug->nr_kmem += nr_pages; 6883 else 6884 ug->pgpgout++; 6885 6886 ug->dummy_page = page; 6887 page->memcg_data = 0; 6888 css_put(&ug->memcg->css); 6889 } 6890 6891 static void uncharge_list(struct list_head *page_list) 6892 { 6893 struct uncharge_gather ug; 6894 struct list_head *next; 6895 6896 uncharge_gather_clear(&ug); 6897 6898 /* 6899 * Note that the list can be a single page->lru; hence the 6900 * do-while loop instead of a simple list_for_each_entry(). 6901 */ 6902 next = page_list->next; 6903 do { 6904 struct page *page; 6905 6906 page = list_entry(next, struct page, lru); 6907 next = page->lru.next; 6908 6909 uncharge_page(page, &ug); 6910 } while (next != page_list); 6911 6912 if (ug.memcg) 6913 uncharge_batch(&ug); 6914 } 6915 6916 /** 6917 * mem_cgroup_uncharge - uncharge a page 6918 * @page: page to uncharge 6919 * 6920 * Uncharge a page previously charged with mem_cgroup_charge(). 6921 */ 6922 void mem_cgroup_uncharge(struct page *page) 6923 { 6924 struct uncharge_gather ug; 6925 6926 if (mem_cgroup_disabled()) 6927 return; 6928 6929 /* Don't touch page->lru of any random page, pre-check: */ 6930 if (!page_memcg(page)) 6931 return; 6932 6933 uncharge_gather_clear(&ug); 6934 uncharge_page(page, &ug); 6935 uncharge_batch(&ug); 6936 } 6937 6938 /** 6939 * mem_cgroup_uncharge_list - uncharge a list of page 6940 * @page_list: list of pages to uncharge 6941 * 6942 * Uncharge a list of pages previously charged with 6943 * mem_cgroup_charge(). 6944 */ 6945 void mem_cgroup_uncharge_list(struct list_head *page_list) 6946 { 6947 if (mem_cgroup_disabled()) 6948 return; 6949 6950 if (!list_empty(page_list)) 6951 uncharge_list(page_list); 6952 } 6953 6954 /** 6955 * mem_cgroup_migrate - charge a page's replacement 6956 * @oldpage: currently circulating page 6957 * @newpage: replacement page 6958 * 6959 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6960 * be uncharged upon free. 6961 * 6962 * Both pages must be locked, @newpage->mapping must be set up. 6963 */ 6964 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6965 { 6966 struct mem_cgroup *memcg; 6967 unsigned int nr_pages; 6968 unsigned long flags; 6969 6970 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6971 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6972 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6973 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6974 newpage); 6975 6976 if (mem_cgroup_disabled()) 6977 return; 6978 6979 /* Page cache replacement: new page already charged? */ 6980 if (page_memcg(newpage)) 6981 return; 6982 6983 /* Swapcache readahead pages can get replaced before being charged */ 6984 memcg = page_memcg(oldpage); 6985 if (!memcg) 6986 return; 6987 6988 /* Force-charge the new page. The old one will be freed soon */ 6989 nr_pages = thp_nr_pages(newpage); 6990 6991 page_counter_charge(&memcg->memory, nr_pages); 6992 if (do_memsw_account()) 6993 page_counter_charge(&memcg->memsw, nr_pages); 6994 6995 css_get(&memcg->css); 6996 commit_charge(newpage, memcg); 6997 6998 local_irq_save(flags); 6999 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7000 memcg_check_events(memcg, newpage); 7001 local_irq_restore(flags); 7002 } 7003 7004 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7005 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7006 7007 void mem_cgroup_sk_alloc(struct sock *sk) 7008 { 7009 struct mem_cgroup *memcg; 7010 7011 if (!mem_cgroup_sockets_enabled) 7012 return; 7013 7014 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7015 if (in_interrupt()) 7016 return; 7017 7018 rcu_read_lock(); 7019 memcg = mem_cgroup_from_task(current); 7020 if (memcg == root_mem_cgroup) 7021 goto out; 7022 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7023 goto out; 7024 if (css_tryget(&memcg->css)) 7025 sk->sk_memcg = memcg; 7026 out: 7027 rcu_read_unlock(); 7028 } 7029 7030 void mem_cgroup_sk_free(struct sock *sk) 7031 { 7032 if (sk->sk_memcg) 7033 css_put(&sk->sk_memcg->css); 7034 } 7035 7036 /** 7037 * mem_cgroup_charge_skmem - charge socket memory 7038 * @memcg: memcg to charge 7039 * @nr_pages: number of pages to charge 7040 * 7041 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7042 * @memcg's configured limit, %false if the charge had to be forced. 7043 */ 7044 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7045 { 7046 gfp_t gfp_mask = GFP_KERNEL; 7047 7048 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7049 struct page_counter *fail; 7050 7051 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7052 memcg->tcpmem_pressure = 0; 7053 return true; 7054 } 7055 page_counter_charge(&memcg->tcpmem, nr_pages); 7056 memcg->tcpmem_pressure = 1; 7057 return false; 7058 } 7059 7060 /* Don't block in the packet receive path */ 7061 if (in_softirq()) 7062 gfp_mask = GFP_NOWAIT; 7063 7064 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7065 7066 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7067 return true; 7068 7069 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7070 return false; 7071 } 7072 7073 /** 7074 * mem_cgroup_uncharge_skmem - uncharge socket memory 7075 * @memcg: memcg to uncharge 7076 * @nr_pages: number of pages to uncharge 7077 */ 7078 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7079 { 7080 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7081 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7082 return; 7083 } 7084 7085 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7086 7087 refill_stock(memcg, nr_pages); 7088 } 7089 7090 static int __init cgroup_memory(char *s) 7091 { 7092 char *token; 7093 7094 while ((token = strsep(&s, ",")) != NULL) { 7095 if (!*token) 7096 continue; 7097 if (!strcmp(token, "nosocket")) 7098 cgroup_memory_nosocket = true; 7099 if (!strcmp(token, "nokmem")) 7100 cgroup_memory_nokmem = true; 7101 } 7102 return 0; 7103 } 7104 __setup("cgroup.memory=", cgroup_memory); 7105 7106 /* 7107 * subsys_initcall() for memory controller. 7108 * 7109 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7110 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7111 * basically everything that doesn't depend on a specific mem_cgroup structure 7112 * should be initialized from here. 7113 */ 7114 static int __init mem_cgroup_init(void) 7115 { 7116 int cpu, node; 7117 7118 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7119 memcg_hotplug_cpu_dead); 7120 7121 for_each_possible_cpu(cpu) 7122 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7123 drain_local_stock); 7124 7125 for_each_node(node) { 7126 struct mem_cgroup_tree_per_node *rtpn; 7127 7128 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7129 node_online(node) ? node : NUMA_NO_NODE); 7130 7131 rtpn->rb_root = RB_ROOT; 7132 rtpn->rb_rightmost = NULL; 7133 spin_lock_init(&rtpn->lock); 7134 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7135 } 7136 7137 return 0; 7138 } 7139 subsys_initcall(mem_cgroup_init); 7140 7141 #ifdef CONFIG_MEMCG_SWAP 7142 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7143 { 7144 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7145 /* 7146 * The root cgroup cannot be destroyed, so it's refcount must 7147 * always be >= 1. 7148 */ 7149 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7150 VM_BUG_ON(1); 7151 break; 7152 } 7153 memcg = parent_mem_cgroup(memcg); 7154 if (!memcg) 7155 memcg = root_mem_cgroup; 7156 } 7157 return memcg; 7158 } 7159 7160 /** 7161 * mem_cgroup_swapout - transfer a memsw charge to swap 7162 * @page: page whose memsw charge to transfer 7163 * @entry: swap entry to move the charge to 7164 * 7165 * Transfer the memsw charge of @page to @entry. 7166 */ 7167 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7168 { 7169 struct mem_cgroup *memcg, *swap_memcg; 7170 unsigned int nr_entries; 7171 unsigned short oldid; 7172 7173 VM_BUG_ON_PAGE(PageLRU(page), page); 7174 VM_BUG_ON_PAGE(page_count(page), page); 7175 7176 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7177 return; 7178 7179 memcg = page_memcg(page); 7180 7181 /* Readahead page, never charged */ 7182 if (!memcg) 7183 return; 7184 7185 /* 7186 * In case the memcg owning these pages has been offlined and doesn't 7187 * have an ID allocated to it anymore, charge the closest online 7188 * ancestor for the swap instead and transfer the memory+swap charge. 7189 */ 7190 swap_memcg = mem_cgroup_id_get_online(memcg); 7191 nr_entries = thp_nr_pages(page); 7192 /* Get references for the tail pages, too */ 7193 if (nr_entries > 1) 7194 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7195 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7196 nr_entries); 7197 VM_BUG_ON_PAGE(oldid, page); 7198 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7199 7200 page->memcg_data = 0; 7201 7202 if (!mem_cgroup_is_root(memcg)) 7203 page_counter_uncharge(&memcg->memory, nr_entries); 7204 7205 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7206 if (!mem_cgroup_is_root(swap_memcg)) 7207 page_counter_charge(&swap_memcg->memsw, nr_entries); 7208 page_counter_uncharge(&memcg->memsw, nr_entries); 7209 } 7210 7211 /* 7212 * Interrupts should be disabled here because the caller holds the 7213 * i_pages lock which is taken with interrupts-off. It is 7214 * important here to have the interrupts disabled because it is the 7215 * only synchronisation we have for updating the per-CPU variables. 7216 */ 7217 VM_BUG_ON(!irqs_disabled()); 7218 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7219 memcg_check_events(memcg, page); 7220 7221 css_put(&memcg->css); 7222 } 7223 7224 /** 7225 * mem_cgroup_try_charge_swap - try charging swap space for a page 7226 * @page: page being added to swap 7227 * @entry: swap entry to charge 7228 * 7229 * Try to charge @page's memcg for the swap space at @entry. 7230 * 7231 * Returns 0 on success, -ENOMEM on failure. 7232 */ 7233 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7234 { 7235 unsigned int nr_pages = thp_nr_pages(page); 7236 struct page_counter *counter; 7237 struct mem_cgroup *memcg; 7238 unsigned short oldid; 7239 7240 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7241 return 0; 7242 7243 memcg = page_memcg(page); 7244 7245 /* Readahead page, never charged */ 7246 if (!memcg) 7247 return 0; 7248 7249 if (!entry.val) { 7250 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7251 return 0; 7252 } 7253 7254 memcg = mem_cgroup_id_get_online(memcg); 7255 7256 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7257 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7258 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7259 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7260 mem_cgroup_id_put(memcg); 7261 return -ENOMEM; 7262 } 7263 7264 /* Get references for the tail pages, too */ 7265 if (nr_pages > 1) 7266 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7267 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7268 VM_BUG_ON_PAGE(oldid, page); 7269 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7270 7271 return 0; 7272 } 7273 7274 /** 7275 * mem_cgroup_uncharge_swap - uncharge swap space 7276 * @entry: swap entry to uncharge 7277 * @nr_pages: the amount of swap space to uncharge 7278 */ 7279 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7280 { 7281 struct mem_cgroup *memcg; 7282 unsigned short id; 7283 7284 id = swap_cgroup_record(entry, 0, nr_pages); 7285 rcu_read_lock(); 7286 memcg = mem_cgroup_from_id(id); 7287 if (memcg) { 7288 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7289 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7290 page_counter_uncharge(&memcg->swap, nr_pages); 7291 else 7292 page_counter_uncharge(&memcg->memsw, nr_pages); 7293 } 7294 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7295 mem_cgroup_id_put_many(memcg, nr_pages); 7296 } 7297 rcu_read_unlock(); 7298 } 7299 7300 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7301 { 7302 long nr_swap_pages = get_nr_swap_pages(); 7303 7304 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7305 return nr_swap_pages; 7306 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7307 nr_swap_pages = min_t(long, nr_swap_pages, 7308 READ_ONCE(memcg->swap.max) - 7309 page_counter_read(&memcg->swap)); 7310 return nr_swap_pages; 7311 } 7312 7313 bool mem_cgroup_swap_full(struct page *page) 7314 { 7315 struct mem_cgroup *memcg; 7316 7317 VM_BUG_ON_PAGE(!PageLocked(page), page); 7318 7319 if (vm_swap_full()) 7320 return true; 7321 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7322 return false; 7323 7324 memcg = page_memcg(page); 7325 if (!memcg) 7326 return false; 7327 7328 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7329 unsigned long usage = page_counter_read(&memcg->swap); 7330 7331 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7332 usage * 2 >= READ_ONCE(memcg->swap.max)) 7333 return true; 7334 } 7335 7336 return false; 7337 } 7338 7339 static int __init setup_swap_account(char *s) 7340 { 7341 if (!strcmp(s, "1")) 7342 cgroup_memory_noswap = 0; 7343 else if (!strcmp(s, "0")) 7344 cgroup_memory_noswap = 1; 7345 return 1; 7346 } 7347 __setup("swapaccount=", setup_swap_account); 7348 7349 static u64 swap_current_read(struct cgroup_subsys_state *css, 7350 struct cftype *cft) 7351 { 7352 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7353 7354 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7355 } 7356 7357 static int swap_high_show(struct seq_file *m, void *v) 7358 { 7359 return seq_puts_memcg_tunable(m, 7360 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7361 } 7362 7363 static ssize_t swap_high_write(struct kernfs_open_file *of, 7364 char *buf, size_t nbytes, loff_t off) 7365 { 7366 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7367 unsigned long high; 7368 int err; 7369 7370 buf = strstrip(buf); 7371 err = page_counter_memparse(buf, "max", &high); 7372 if (err) 7373 return err; 7374 7375 page_counter_set_high(&memcg->swap, high); 7376 7377 return nbytes; 7378 } 7379 7380 static int swap_max_show(struct seq_file *m, void *v) 7381 { 7382 return seq_puts_memcg_tunable(m, 7383 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7384 } 7385 7386 static ssize_t swap_max_write(struct kernfs_open_file *of, 7387 char *buf, size_t nbytes, loff_t off) 7388 { 7389 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7390 unsigned long max; 7391 int err; 7392 7393 buf = strstrip(buf); 7394 err = page_counter_memparse(buf, "max", &max); 7395 if (err) 7396 return err; 7397 7398 xchg(&memcg->swap.max, max); 7399 7400 return nbytes; 7401 } 7402 7403 static int swap_events_show(struct seq_file *m, void *v) 7404 { 7405 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7406 7407 seq_printf(m, "high %lu\n", 7408 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7409 seq_printf(m, "max %lu\n", 7410 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7411 seq_printf(m, "fail %lu\n", 7412 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7413 7414 return 0; 7415 } 7416 7417 static struct cftype swap_files[] = { 7418 { 7419 .name = "swap.current", 7420 .flags = CFTYPE_NOT_ON_ROOT, 7421 .read_u64 = swap_current_read, 7422 }, 7423 { 7424 .name = "swap.high", 7425 .flags = CFTYPE_NOT_ON_ROOT, 7426 .seq_show = swap_high_show, 7427 .write = swap_high_write, 7428 }, 7429 { 7430 .name = "swap.max", 7431 .flags = CFTYPE_NOT_ON_ROOT, 7432 .seq_show = swap_max_show, 7433 .write = swap_max_write, 7434 }, 7435 { 7436 .name = "swap.events", 7437 .flags = CFTYPE_NOT_ON_ROOT, 7438 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7439 .seq_show = swap_events_show, 7440 }, 7441 { } /* terminate */ 7442 }; 7443 7444 static struct cftype memsw_files[] = { 7445 { 7446 .name = "memsw.usage_in_bytes", 7447 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7448 .read_u64 = mem_cgroup_read_u64, 7449 }, 7450 { 7451 .name = "memsw.max_usage_in_bytes", 7452 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7453 .write = mem_cgroup_reset, 7454 .read_u64 = mem_cgroup_read_u64, 7455 }, 7456 { 7457 .name = "memsw.limit_in_bytes", 7458 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7459 .write = mem_cgroup_write, 7460 .read_u64 = mem_cgroup_read_u64, 7461 }, 7462 { 7463 .name = "memsw.failcnt", 7464 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7465 .write = mem_cgroup_reset, 7466 .read_u64 = mem_cgroup_read_u64, 7467 }, 7468 { }, /* terminate */ 7469 }; 7470 7471 /* 7472 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7473 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7474 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7475 * boot parameter. This may result in premature OOPS inside 7476 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7477 */ 7478 static int __init mem_cgroup_swap_init(void) 7479 { 7480 /* No memory control -> no swap control */ 7481 if (mem_cgroup_disabled()) 7482 cgroup_memory_noswap = true; 7483 7484 if (cgroup_memory_noswap) 7485 return 0; 7486 7487 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7488 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7489 7490 return 0; 7491 } 7492 core_initcall(mem_cgroup_swap_init); 7493 7494 #endif /* CONFIG_MEMCG_SWAP */ 7495