1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 */ 23 24 #include <linux/res_counter.h> 25 #include <linux/memcontrol.h> 26 #include <linux/cgroup.h> 27 #include <linux/mm.h> 28 #include <linux/hugetlb.h> 29 #include <linux/pagemap.h> 30 #include <linux/smp.h> 31 #include <linux/page-flags.h> 32 #include <linux/backing-dev.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/rcupdate.h> 35 #include <linux/limits.h> 36 #include <linux/mutex.h> 37 #include <linux/rbtree.h> 38 #include <linux/slab.h> 39 #include <linux/swap.h> 40 #include <linux/swapops.h> 41 #include <linux/spinlock.h> 42 #include <linux/eventfd.h> 43 #include <linux/sort.h> 44 #include <linux/fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/vmalloc.h> 47 #include <linux/mm_inline.h> 48 #include <linux/page_cgroup.h> 49 #include <linux/cpu.h> 50 #include <linux/oom.h> 51 #include "internal.h" 52 53 #include <asm/uaccess.h> 54 55 #include <trace/events/vmscan.h> 56 57 struct cgroup_subsys mem_cgroup_subsys __read_mostly; 58 #define MEM_CGROUP_RECLAIM_RETRIES 5 59 struct mem_cgroup *root_mem_cgroup __read_mostly; 60 61 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 62 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 63 int do_swap_account __read_mostly; 64 65 /* for remember boot option*/ 66 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 67 static int really_do_swap_account __initdata = 1; 68 #else 69 static int really_do_swap_account __initdata = 0; 70 #endif 71 72 #else 73 #define do_swap_account (0) 74 #endif 75 76 77 /* 78 * Statistics for memory cgroup. 79 */ 80 enum mem_cgroup_stat_index { 81 /* 82 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 83 */ 84 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 85 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 86 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 87 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 88 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 89 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ 90 MEM_CGROUP_STAT_NSTATS, 91 }; 92 93 enum mem_cgroup_events_index { 94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 99 MEM_CGROUP_EVENTS_NSTATS, 100 }; 101 /* 102 * Per memcg event counter is incremented at every pagein/pageout. With THP, 103 * it will be incremated by the number of pages. This counter is used for 104 * for trigger some periodic events. This is straightforward and better 105 * than using jiffies etc. to handle periodic memcg event. 106 */ 107 enum mem_cgroup_events_target { 108 MEM_CGROUP_TARGET_THRESH, 109 MEM_CGROUP_TARGET_SOFTLIMIT, 110 MEM_CGROUP_NTARGETS, 111 }; 112 #define THRESHOLDS_EVENTS_TARGET (128) 113 #define SOFTLIMIT_EVENTS_TARGET (1024) 114 115 struct mem_cgroup_stat_cpu { 116 long count[MEM_CGROUP_STAT_NSTATS]; 117 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 118 unsigned long targets[MEM_CGROUP_NTARGETS]; 119 }; 120 121 /* 122 * per-zone information in memory controller. 123 */ 124 struct mem_cgroup_per_zone { 125 /* 126 * spin_lock to protect the per cgroup LRU 127 */ 128 struct list_head lists[NR_LRU_LISTS]; 129 unsigned long count[NR_LRU_LISTS]; 130 131 struct zone_reclaim_stat reclaim_stat; 132 struct rb_node tree_node; /* RB tree node */ 133 unsigned long long usage_in_excess;/* Set to the value by which */ 134 /* the soft limit is exceeded*/ 135 bool on_tree; 136 struct mem_cgroup *mem; /* Back pointer, we cannot */ 137 /* use container_of */ 138 }; 139 /* Macro for accessing counter */ 140 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 141 142 struct mem_cgroup_per_node { 143 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 144 }; 145 146 struct mem_cgroup_lru_info { 147 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; 148 }; 149 150 /* 151 * Cgroups above their limits are maintained in a RB-Tree, independent of 152 * their hierarchy representation 153 */ 154 155 struct mem_cgroup_tree_per_zone { 156 struct rb_root rb_root; 157 spinlock_t lock; 158 }; 159 160 struct mem_cgroup_tree_per_node { 161 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 162 }; 163 164 struct mem_cgroup_tree { 165 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 166 }; 167 168 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 169 170 struct mem_cgroup_threshold { 171 struct eventfd_ctx *eventfd; 172 u64 threshold; 173 }; 174 175 /* For threshold */ 176 struct mem_cgroup_threshold_ary { 177 /* An array index points to threshold just below usage. */ 178 int current_threshold; 179 /* Size of entries[] */ 180 unsigned int size; 181 /* Array of thresholds */ 182 struct mem_cgroup_threshold entries[0]; 183 }; 184 185 struct mem_cgroup_thresholds { 186 /* Primary thresholds array */ 187 struct mem_cgroup_threshold_ary *primary; 188 /* 189 * Spare threshold array. 190 * This is needed to make mem_cgroup_unregister_event() "never fail". 191 * It must be able to store at least primary->size - 1 entries. 192 */ 193 struct mem_cgroup_threshold_ary *spare; 194 }; 195 196 /* for OOM */ 197 struct mem_cgroup_eventfd_list { 198 struct list_head list; 199 struct eventfd_ctx *eventfd; 200 }; 201 202 static void mem_cgroup_threshold(struct mem_cgroup *mem); 203 static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 204 205 /* 206 * The memory controller data structure. The memory controller controls both 207 * page cache and RSS per cgroup. We would eventually like to provide 208 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 209 * to help the administrator determine what knobs to tune. 210 * 211 * TODO: Add a water mark for the memory controller. Reclaim will begin when 212 * we hit the water mark. May be even add a low water mark, such that 213 * no reclaim occurs from a cgroup at it's low water mark, this is 214 * a feature that will be implemented much later in the future. 215 */ 216 struct mem_cgroup { 217 struct cgroup_subsys_state css; 218 /* 219 * the counter to account for memory usage 220 */ 221 struct res_counter res; 222 /* 223 * the counter to account for mem+swap usage. 224 */ 225 struct res_counter memsw; 226 /* 227 * Per cgroup active and inactive list, similar to the 228 * per zone LRU lists. 229 */ 230 struct mem_cgroup_lru_info info; 231 /* 232 * While reclaiming in a hierarchy, we cache the last child we 233 * reclaimed from. 234 */ 235 int last_scanned_child; 236 int last_scanned_node; 237 #if MAX_NUMNODES > 1 238 nodemask_t scan_nodes; 239 unsigned long next_scan_node_update; 240 #endif 241 /* 242 * Should the accounting and control be hierarchical, per subtree? 243 */ 244 bool use_hierarchy; 245 atomic_t oom_lock; 246 atomic_t refcnt; 247 248 unsigned int swappiness; 249 /* OOM-Killer disable */ 250 int oom_kill_disable; 251 252 /* set when res.limit == memsw.limit */ 253 bool memsw_is_minimum; 254 255 /* protect arrays of thresholds */ 256 struct mutex thresholds_lock; 257 258 /* thresholds for memory usage. RCU-protected */ 259 struct mem_cgroup_thresholds thresholds; 260 261 /* thresholds for mem+swap usage. RCU-protected */ 262 struct mem_cgroup_thresholds memsw_thresholds; 263 264 /* For oom notifier event fd */ 265 struct list_head oom_notify; 266 267 /* 268 * Should we move charges of a task when a task is moved into this 269 * mem_cgroup ? And what type of charges should we move ? 270 */ 271 unsigned long move_charge_at_immigrate; 272 /* 273 * percpu counter. 274 */ 275 struct mem_cgroup_stat_cpu *stat; 276 /* 277 * used when a cpu is offlined or other synchronizations 278 * See mem_cgroup_read_stat(). 279 */ 280 struct mem_cgroup_stat_cpu nocpu_base; 281 spinlock_t pcp_counter_lock; 282 }; 283 284 /* Stuffs for move charges at task migration. */ 285 /* 286 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 287 * left-shifted bitmap of these types. 288 */ 289 enum move_type { 290 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 291 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 292 NR_MOVE_TYPE, 293 }; 294 295 /* "mc" and its members are protected by cgroup_mutex */ 296 static struct move_charge_struct { 297 spinlock_t lock; /* for from, to */ 298 struct mem_cgroup *from; 299 struct mem_cgroup *to; 300 unsigned long precharge; 301 unsigned long moved_charge; 302 unsigned long moved_swap; 303 struct task_struct *moving_task; /* a task moving charges */ 304 wait_queue_head_t waitq; /* a waitq for other context */ 305 } mc = { 306 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 307 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 308 }; 309 310 static bool move_anon(void) 311 { 312 return test_bit(MOVE_CHARGE_TYPE_ANON, 313 &mc.to->move_charge_at_immigrate); 314 } 315 316 static bool move_file(void) 317 { 318 return test_bit(MOVE_CHARGE_TYPE_FILE, 319 &mc.to->move_charge_at_immigrate); 320 } 321 322 /* 323 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 324 * limit reclaim to prevent infinite loops, if they ever occur. 325 */ 326 #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 327 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 328 329 enum charge_type { 330 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 331 MEM_CGROUP_CHARGE_TYPE_MAPPED, 332 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 333 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 334 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 335 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 336 NR_CHARGE_TYPE, 337 }; 338 339 /* for encoding cft->private value on file */ 340 #define _MEM (0) 341 #define _MEMSWAP (1) 342 #define _OOM_TYPE (2) 343 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 344 #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 345 #define MEMFILE_ATTR(val) ((val) & 0xffff) 346 /* Used for OOM nofiier */ 347 #define OOM_CONTROL (0) 348 349 /* 350 * Reclaim flags for mem_cgroup_hierarchical_reclaim 351 */ 352 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 353 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 354 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 355 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 356 #define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 357 #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) 358 359 static void mem_cgroup_get(struct mem_cgroup *mem); 360 static void mem_cgroup_put(struct mem_cgroup *mem); 361 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 362 static void drain_all_stock_async(struct mem_cgroup *mem); 363 364 static struct mem_cgroup_per_zone * 365 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 366 { 367 return &mem->info.nodeinfo[nid]->zoneinfo[zid]; 368 } 369 370 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 371 { 372 return &mem->css; 373 } 374 375 static struct mem_cgroup_per_zone * 376 page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) 377 { 378 int nid = page_to_nid(page); 379 int zid = page_zonenum(page); 380 381 return mem_cgroup_zoneinfo(mem, nid, zid); 382 } 383 384 static struct mem_cgroup_tree_per_zone * 385 soft_limit_tree_node_zone(int nid, int zid) 386 { 387 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 388 } 389 390 static struct mem_cgroup_tree_per_zone * 391 soft_limit_tree_from_page(struct page *page) 392 { 393 int nid = page_to_nid(page); 394 int zid = page_zonenum(page); 395 396 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 397 } 398 399 static void 400 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 401 struct mem_cgroup_per_zone *mz, 402 struct mem_cgroup_tree_per_zone *mctz, 403 unsigned long long new_usage_in_excess) 404 { 405 struct rb_node **p = &mctz->rb_root.rb_node; 406 struct rb_node *parent = NULL; 407 struct mem_cgroup_per_zone *mz_node; 408 409 if (mz->on_tree) 410 return; 411 412 mz->usage_in_excess = new_usage_in_excess; 413 if (!mz->usage_in_excess) 414 return; 415 while (*p) { 416 parent = *p; 417 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 418 tree_node); 419 if (mz->usage_in_excess < mz_node->usage_in_excess) 420 p = &(*p)->rb_left; 421 /* 422 * We can't avoid mem cgroups that are over their soft 423 * limit by the same amount 424 */ 425 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 426 p = &(*p)->rb_right; 427 } 428 rb_link_node(&mz->tree_node, parent, p); 429 rb_insert_color(&mz->tree_node, &mctz->rb_root); 430 mz->on_tree = true; 431 } 432 433 static void 434 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 435 struct mem_cgroup_per_zone *mz, 436 struct mem_cgroup_tree_per_zone *mctz) 437 { 438 if (!mz->on_tree) 439 return; 440 rb_erase(&mz->tree_node, &mctz->rb_root); 441 mz->on_tree = false; 442 } 443 444 static void 445 mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 446 struct mem_cgroup_per_zone *mz, 447 struct mem_cgroup_tree_per_zone *mctz) 448 { 449 spin_lock(&mctz->lock); 450 __mem_cgroup_remove_exceeded(mem, mz, mctz); 451 spin_unlock(&mctz->lock); 452 } 453 454 455 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 456 { 457 unsigned long long excess; 458 struct mem_cgroup_per_zone *mz; 459 struct mem_cgroup_tree_per_zone *mctz; 460 int nid = page_to_nid(page); 461 int zid = page_zonenum(page); 462 mctz = soft_limit_tree_from_page(page); 463 464 /* 465 * Necessary to update all ancestors when hierarchy is used. 466 * because their event counter is not touched. 467 */ 468 for (; mem; mem = parent_mem_cgroup(mem)) { 469 mz = mem_cgroup_zoneinfo(mem, nid, zid); 470 excess = res_counter_soft_limit_excess(&mem->res); 471 /* 472 * We have to update the tree if mz is on RB-tree or 473 * mem is over its softlimit. 474 */ 475 if (excess || mz->on_tree) { 476 spin_lock(&mctz->lock); 477 /* if on-tree, remove it */ 478 if (mz->on_tree) 479 __mem_cgroup_remove_exceeded(mem, mz, mctz); 480 /* 481 * Insert again. mz->usage_in_excess will be updated. 482 * If excess is 0, no tree ops. 483 */ 484 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); 485 spin_unlock(&mctz->lock); 486 } 487 } 488 } 489 490 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) 491 { 492 int node, zone; 493 struct mem_cgroup_per_zone *mz; 494 struct mem_cgroup_tree_per_zone *mctz; 495 496 for_each_node_state(node, N_POSSIBLE) { 497 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 498 mz = mem_cgroup_zoneinfo(mem, node, zone); 499 mctz = soft_limit_tree_node_zone(node, zone); 500 mem_cgroup_remove_exceeded(mem, mz, mctz); 501 } 502 } 503 } 504 505 static struct mem_cgroup_per_zone * 506 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 507 { 508 struct rb_node *rightmost = NULL; 509 struct mem_cgroup_per_zone *mz; 510 511 retry: 512 mz = NULL; 513 rightmost = rb_last(&mctz->rb_root); 514 if (!rightmost) 515 goto done; /* Nothing to reclaim from */ 516 517 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 518 /* 519 * Remove the node now but someone else can add it back, 520 * we will to add it back at the end of reclaim to its correct 521 * position in the tree. 522 */ 523 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 524 if (!res_counter_soft_limit_excess(&mz->mem->res) || 525 !css_tryget(&mz->mem->css)) 526 goto retry; 527 done: 528 return mz; 529 } 530 531 static struct mem_cgroup_per_zone * 532 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 533 { 534 struct mem_cgroup_per_zone *mz; 535 536 spin_lock(&mctz->lock); 537 mz = __mem_cgroup_largest_soft_limit_node(mctz); 538 spin_unlock(&mctz->lock); 539 return mz; 540 } 541 542 /* 543 * Implementation Note: reading percpu statistics for memcg. 544 * 545 * Both of vmstat[] and percpu_counter has threshold and do periodic 546 * synchronization to implement "quick" read. There are trade-off between 547 * reading cost and precision of value. Then, we may have a chance to implement 548 * a periodic synchronizion of counter in memcg's counter. 549 * 550 * But this _read() function is used for user interface now. The user accounts 551 * memory usage by memory cgroup and he _always_ requires exact value because 552 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 553 * have to visit all online cpus and make sum. So, for now, unnecessary 554 * synchronization is not implemented. (just implemented for cpu hotplug) 555 * 556 * If there are kernel internal actions which can make use of some not-exact 557 * value, and reading all cpu value can be performance bottleneck in some 558 * common workload, threashold and synchonization as vmstat[] should be 559 * implemented. 560 */ 561 static long mem_cgroup_read_stat(struct mem_cgroup *mem, 562 enum mem_cgroup_stat_index idx) 563 { 564 long val = 0; 565 int cpu; 566 567 get_online_cpus(); 568 for_each_online_cpu(cpu) 569 val += per_cpu(mem->stat->count[idx], cpu); 570 #ifdef CONFIG_HOTPLUG_CPU 571 spin_lock(&mem->pcp_counter_lock); 572 val += mem->nocpu_base.count[idx]; 573 spin_unlock(&mem->pcp_counter_lock); 574 #endif 575 put_online_cpus(); 576 return val; 577 } 578 579 static long mem_cgroup_local_usage(struct mem_cgroup *mem) 580 { 581 long ret; 582 583 ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 584 ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 585 return ret; 586 } 587 588 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, 589 bool charge) 590 { 591 int val = (charge) ? 1 : -1; 592 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 593 } 594 595 void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) 596 { 597 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); 598 } 599 600 void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) 601 { 602 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); 603 } 604 605 static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 606 enum mem_cgroup_events_index idx) 607 { 608 unsigned long val = 0; 609 int cpu; 610 611 for_each_online_cpu(cpu) 612 val += per_cpu(mem->stat->events[idx], cpu); 613 #ifdef CONFIG_HOTPLUG_CPU 614 spin_lock(&mem->pcp_counter_lock); 615 val += mem->nocpu_base.events[idx]; 616 spin_unlock(&mem->pcp_counter_lock); 617 #endif 618 return val; 619 } 620 621 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 622 bool file, int nr_pages) 623 { 624 preempt_disable(); 625 626 if (file) 627 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); 628 else 629 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); 630 631 /* pagein of a big page is an event. So, ignore page size */ 632 if (nr_pages > 0) 633 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 634 else { 635 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 636 nr_pages = -nr_pages; /* for event */ 637 } 638 639 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 640 641 preempt_enable(); 642 } 643 644 static unsigned long 645 mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx) 646 { 647 struct mem_cgroup_per_zone *mz; 648 u64 total = 0; 649 int zid; 650 651 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 652 mz = mem_cgroup_zoneinfo(mem, nid, zid); 653 total += MEM_CGROUP_ZSTAT(mz, idx); 654 } 655 return total; 656 } 657 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, 658 enum lru_list idx) 659 { 660 int nid; 661 u64 total = 0; 662 663 for_each_online_node(nid) 664 total += mem_cgroup_get_zonestat_node(mem, nid, idx); 665 return total; 666 } 667 668 static bool __memcg_event_check(struct mem_cgroup *mem, int target) 669 { 670 unsigned long val, next; 671 672 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 673 next = this_cpu_read(mem->stat->targets[target]); 674 /* from time_after() in jiffies.h */ 675 return ((long)next - (long)val < 0); 676 } 677 678 static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) 679 { 680 unsigned long val, next; 681 682 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); 683 684 switch (target) { 685 case MEM_CGROUP_TARGET_THRESH: 686 next = val + THRESHOLDS_EVENTS_TARGET; 687 break; 688 case MEM_CGROUP_TARGET_SOFTLIMIT: 689 next = val + SOFTLIMIT_EVENTS_TARGET; 690 break; 691 default: 692 return; 693 } 694 695 this_cpu_write(mem->stat->targets[target], next); 696 } 697 698 /* 699 * Check events in order. 700 * 701 */ 702 static void memcg_check_events(struct mem_cgroup *mem, struct page *page) 703 { 704 /* threshold event is triggered in finer grain than soft limit */ 705 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { 706 mem_cgroup_threshold(mem); 707 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); 708 if (unlikely(__memcg_event_check(mem, 709 MEM_CGROUP_TARGET_SOFTLIMIT))){ 710 mem_cgroup_update_tree(mem, page); 711 __mem_cgroup_target_update(mem, 712 MEM_CGROUP_TARGET_SOFTLIMIT); 713 } 714 } 715 } 716 717 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 718 { 719 return container_of(cgroup_subsys_state(cont, 720 mem_cgroup_subsys_id), struct mem_cgroup, 721 css); 722 } 723 724 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 725 { 726 /* 727 * mm_update_next_owner() may clear mm->owner to NULL 728 * if it races with swapoff, page migration, etc. 729 * So this can be called with p == NULL. 730 */ 731 if (unlikely(!p)) 732 return NULL; 733 734 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 735 struct mem_cgroup, css); 736 } 737 738 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 739 { 740 struct mem_cgroup *mem = NULL; 741 742 if (!mm) 743 return NULL; 744 /* 745 * Because we have no locks, mm->owner's may be being moved to other 746 * cgroup. We use css_tryget() here even if this looks 747 * pessimistic (rather than adding locks here). 748 */ 749 rcu_read_lock(); 750 do { 751 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 752 if (unlikely(!mem)) 753 break; 754 } while (!css_tryget(&mem->css)); 755 rcu_read_unlock(); 756 return mem; 757 } 758 759 /* The caller has to guarantee "mem" exists before calling this */ 760 static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) 761 { 762 struct cgroup_subsys_state *css; 763 int found; 764 765 if (!mem) /* ROOT cgroup has the smallest ID */ 766 return root_mem_cgroup; /*css_put/get against root is ignored*/ 767 if (!mem->use_hierarchy) { 768 if (css_tryget(&mem->css)) 769 return mem; 770 return NULL; 771 } 772 rcu_read_lock(); 773 /* 774 * searching a memory cgroup which has the smallest ID under given 775 * ROOT cgroup. (ID >= 1) 776 */ 777 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); 778 if (css && css_tryget(css)) 779 mem = container_of(css, struct mem_cgroup, css); 780 else 781 mem = NULL; 782 rcu_read_unlock(); 783 return mem; 784 } 785 786 static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, 787 struct mem_cgroup *root, 788 bool cond) 789 { 790 int nextid = css_id(&iter->css) + 1; 791 int found; 792 int hierarchy_used; 793 struct cgroup_subsys_state *css; 794 795 hierarchy_used = iter->use_hierarchy; 796 797 css_put(&iter->css); 798 /* If no ROOT, walk all, ignore hierarchy */ 799 if (!cond || (root && !hierarchy_used)) 800 return NULL; 801 802 if (!root) 803 root = root_mem_cgroup; 804 805 do { 806 iter = NULL; 807 rcu_read_lock(); 808 809 css = css_get_next(&mem_cgroup_subsys, nextid, 810 &root->css, &found); 811 if (css && css_tryget(css)) 812 iter = container_of(css, struct mem_cgroup, css); 813 rcu_read_unlock(); 814 /* If css is NULL, no more cgroups will be found */ 815 nextid = found + 1; 816 } while (css && !iter); 817 818 return iter; 819 } 820 /* 821 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please 822 * be careful that "break" loop is not allowed. We have reference count. 823 * Instead of that modify "cond" to be false and "continue" to exit the loop. 824 */ 825 #define for_each_mem_cgroup_tree_cond(iter, root, cond) \ 826 for (iter = mem_cgroup_start_loop(root);\ 827 iter != NULL;\ 828 iter = mem_cgroup_get_next(iter, root, cond)) 829 830 #define for_each_mem_cgroup_tree(iter, root) \ 831 for_each_mem_cgroup_tree_cond(iter, root, true) 832 833 #define for_each_mem_cgroup_all(iter) \ 834 for_each_mem_cgroup_tree_cond(iter, NULL, true) 835 836 837 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) 838 { 839 return (mem == root_mem_cgroup); 840 } 841 842 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 843 { 844 struct mem_cgroup *mem; 845 846 if (!mm) 847 return; 848 849 rcu_read_lock(); 850 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 851 if (unlikely(!mem)) 852 goto out; 853 854 switch (idx) { 855 case PGMAJFAULT: 856 mem_cgroup_pgmajfault(mem, 1); 857 break; 858 case PGFAULT: 859 mem_cgroup_pgfault(mem, 1); 860 break; 861 default: 862 BUG(); 863 } 864 out: 865 rcu_read_unlock(); 866 } 867 EXPORT_SYMBOL(mem_cgroup_count_vm_event); 868 869 /* 870 * Following LRU functions are allowed to be used without PCG_LOCK. 871 * Operations are called by routine of global LRU independently from memcg. 872 * What we have to take care of here is validness of pc->mem_cgroup. 873 * 874 * Changes to pc->mem_cgroup happens when 875 * 1. charge 876 * 2. moving account 877 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 878 * It is added to LRU before charge. 879 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 880 * When moving account, the page is not on LRU. It's isolated. 881 */ 882 883 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) 884 { 885 struct page_cgroup *pc; 886 struct mem_cgroup_per_zone *mz; 887 888 if (mem_cgroup_disabled()) 889 return; 890 pc = lookup_page_cgroup(page); 891 /* can happen while we handle swapcache. */ 892 if (!TestClearPageCgroupAcctLRU(pc)) 893 return; 894 VM_BUG_ON(!pc->mem_cgroup); 895 /* 896 * We don't check PCG_USED bit. It's cleared when the "page" is finally 897 * removed from global LRU. 898 */ 899 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 900 /* huge page split is done under lru_lock. so, we have no races. */ 901 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 902 if (mem_cgroup_is_root(pc->mem_cgroup)) 903 return; 904 VM_BUG_ON(list_empty(&pc->lru)); 905 list_del_init(&pc->lru); 906 } 907 908 void mem_cgroup_del_lru(struct page *page) 909 { 910 mem_cgroup_del_lru_list(page, page_lru(page)); 911 } 912 913 /* 914 * Writeback is about to end against a page which has been marked for immediate 915 * reclaim. If it still appears to be reclaimable, move it to the tail of the 916 * inactive list. 917 */ 918 void mem_cgroup_rotate_reclaimable_page(struct page *page) 919 { 920 struct mem_cgroup_per_zone *mz; 921 struct page_cgroup *pc; 922 enum lru_list lru = page_lru(page); 923 924 if (mem_cgroup_disabled()) 925 return; 926 927 pc = lookup_page_cgroup(page); 928 /* unused or root page is not rotated. */ 929 if (!PageCgroupUsed(pc)) 930 return; 931 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 932 smp_rmb(); 933 if (mem_cgroup_is_root(pc->mem_cgroup)) 934 return; 935 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 936 list_move_tail(&pc->lru, &mz->lists[lru]); 937 } 938 939 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 940 { 941 struct mem_cgroup_per_zone *mz; 942 struct page_cgroup *pc; 943 944 if (mem_cgroup_disabled()) 945 return; 946 947 pc = lookup_page_cgroup(page); 948 /* unused or root page is not rotated. */ 949 if (!PageCgroupUsed(pc)) 950 return; 951 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 952 smp_rmb(); 953 if (mem_cgroup_is_root(pc->mem_cgroup)) 954 return; 955 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 956 list_move(&pc->lru, &mz->lists[lru]); 957 } 958 959 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 960 { 961 struct page_cgroup *pc; 962 struct mem_cgroup_per_zone *mz; 963 964 if (mem_cgroup_disabled()) 965 return; 966 pc = lookup_page_cgroup(page); 967 VM_BUG_ON(PageCgroupAcctLRU(pc)); 968 if (!PageCgroupUsed(pc)) 969 return; 970 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 971 smp_rmb(); 972 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 973 /* huge page split is done under lru_lock. so, we have no races. */ 974 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 975 SetPageCgroupAcctLRU(pc); 976 if (mem_cgroup_is_root(pc->mem_cgroup)) 977 return; 978 list_add(&pc->lru, &mz->lists[lru]); 979 } 980 981 /* 982 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed 983 * while it's linked to lru because the page may be reused after it's fully 984 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. 985 * It's done under lock_page and expected that zone->lru_lock isnever held. 986 */ 987 static void mem_cgroup_lru_del_before_commit(struct page *page) 988 { 989 unsigned long flags; 990 struct zone *zone = page_zone(page); 991 struct page_cgroup *pc = lookup_page_cgroup(page); 992 993 /* 994 * Doing this check without taking ->lru_lock seems wrong but this 995 * is safe. Because if page_cgroup's USED bit is unset, the page 996 * will not be added to any memcg's LRU. If page_cgroup's USED bit is 997 * set, the commit after this will fail, anyway. 998 * This all charge/uncharge is done under some mutual execustion. 999 * So, we don't need to taking care of changes in USED bit. 1000 */ 1001 if (likely(!PageLRU(page))) 1002 return; 1003 1004 spin_lock_irqsave(&zone->lru_lock, flags); 1005 /* 1006 * Forget old LRU when this page_cgroup is *not* used. This Used bit 1007 * is guarded by lock_page() because the page is SwapCache. 1008 */ 1009 if (!PageCgroupUsed(pc)) 1010 mem_cgroup_del_lru_list(page, page_lru(page)); 1011 spin_unlock_irqrestore(&zone->lru_lock, flags); 1012 } 1013 1014 static void mem_cgroup_lru_add_after_commit(struct page *page) 1015 { 1016 unsigned long flags; 1017 struct zone *zone = page_zone(page); 1018 struct page_cgroup *pc = lookup_page_cgroup(page); 1019 1020 /* taking care of that the page is added to LRU while we commit it */ 1021 if (likely(!PageLRU(page))) 1022 return; 1023 spin_lock_irqsave(&zone->lru_lock, flags); 1024 /* link when the page is linked to LRU but page_cgroup isn't */ 1025 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) 1026 mem_cgroup_add_lru_list(page, page_lru(page)); 1027 spin_unlock_irqrestore(&zone->lru_lock, flags); 1028 } 1029 1030 1031 void mem_cgroup_move_lists(struct page *page, 1032 enum lru_list from, enum lru_list to) 1033 { 1034 if (mem_cgroup_disabled()) 1035 return; 1036 mem_cgroup_del_lru_list(page, from); 1037 mem_cgroup_add_lru_list(page, to); 1038 } 1039 1040 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 1041 { 1042 int ret; 1043 struct mem_cgroup *curr = NULL; 1044 struct task_struct *p; 1045 1046 p = find_lock_task_mm(task); 1047 if (!p) 1048 return 0; 1049 curr = try_get_mem_cgroup_from_mm(p->mm); 1050 task_unlock(p); 1051 if (!curr) 1052 return 0; 1053 /* 1054 * We should check use_hierarchy of "mem" not "curr". Because checking 1055 * use_hierarchy of "curr" here make this function true if hierarchy is 1056 * enabled in "curr" and "curr" is a child of "mem" in *cgroup* 1057 * hierarchy(even if use_hierarchy is disabled in "mem"). 1058 */ 1059 if (mem->use_hierarchy) 1060 ret = css_is_ancestor(&curr->css, &mem->css); 1061 else 1062 ret = (curr == mem); 1063 css_put(&curr->css); 1064 return ret; 1065 } 1066 1067 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) 1068 { 1069 unsigned long active; 1070 unsigned long inactive; 1071 unsigned long gb; 1072 unsigned long inactive_ratio; 1073 1074 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON); 1075 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON); 1076 1077 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1078 if (gb) 1079 inactive_ratio = int_sqrt(10 * gb); 1080 else 1081 inactive_ratio = 1; 1082 1083 if (present_pages) { 1084 present_pages[0] = inactive; 1085 present_pages[1] = active; 1086 } 1087 1088 return inactive_ratio; 1089 } 1090 1091 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 1092 { 1093 unsigned long active; 1094 unsigned long inactive; 1095 unsigned long present_pages[2]; 1096 unsigned long inactive_ratio; 1097 1098 inactive_ratio = calc_inactive_ratio(memcg, present_pages); 1099 1100 inactive = present_pages[0]; 1101 active = present_pages[1]; 1102 1103 if (inactive * inactive_ratio < active) 1104 return 1; 1105 1106 return 0; 1107 } 1108 1109 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 1110 { 1111 unsigned long active; 1112 unsigned long inactive; 1113 1114 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE); 1115 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE); 1116 1117 return (active > inactive); 1118 } 1119 1120 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 1121 struct zone *zone, 1122 enum lru_list lru) 1123 { 1124 int nid = zone_to_nid(zone); 1125 int zid = zone_idx(zone); 1126 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1127 1128 return MEM_CGROUP_ZSTAT(mz, lru); 1129 } 1130 1131 #ifdef CONFIG_NUMA 1132 static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg, 1133 int nid) 1134 { 1135 unsigned long ret; 1136 1137 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) + 1138 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE); 1139 1140 return ret; 1141 } 1142 1143 static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg) 1144 { 1145 u64 total = 0; 1146 int nid; 1147 1148 for_each_node_state(nid, N_HIGH_MEMORY) 1149 total += mem_cgroup_node_nr_file_lru_pages(memcg, nid); 1150 1151 return total; 1152 } 1153 1154 static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg, 1155 int nid) 1156 { 1157 unsigned long ret; 1158 1159 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) + 1160 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON); 1161 1162 return ret; 1163 } 1164 1165 static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg) 1166 { 1167 u64 total = 0; 1168 int nid; 1169 1170 for_each_node_state(nid, N_HIGH_MEMORY) 1171 total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid); 1172 1173 return total; 1174 } 1175 1176 static unsigned long 1177 mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid) 1178 { 1179 return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE); 1180 } 1181 1182 static unsigned long 1183 mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg) 1184 { 1185 u64 total = 0; 1186 int nid; 1187 1188 for_each_node_state(nid, N_HIGH_MEMORY) 1189 total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid); 1190 1191 return total; 1192 } 1193 1194 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 1195 int nid) 1196 { 1197 enum lru_list l; 1198 u64 total = 0; 1199 1200 for_each_lru(l) 1201 total += mem_cgroup_get_zonestat_node(memcg, nid, l); 1202 1203 return total; 1204 } 1205 1206 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg) 1207 { 1208 u64 total = 0; 1209 int nid; 1210 1211 for_each_node_state(nid, N_HIGH_MEMORY) 1212 total += mem_cgroup_node_nr_lru_pages(memcg, nid); 1213 1214 return total; 1215 } 1216 #endif /* CONFIG_NUMA */ 1217 1218 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1219 struct zone *zone) 1220 { 1221 int nid = zone_to_nid(zone); 1222 int zid = zone_idx(zone); 1223 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); 1224 1225 return &mz->reclaim_stat; 1226 } 1227 1228 struct zone_reclaim_stat * 1229 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1230 { 1231 struct page_cgroup *pc; 1232 struct mem_cgroup_per_zone *mz; 1233 1234 if (mem_cgroup_disabled()) 1235 return NULL; 1236 1237 pc = lookup_page_cgroup(page); 1238 if (!PageCgroupUsed(pc)) 1239 return NULL; 1240 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1241 smp_rmb(); 1242 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1243 return &mz->reclaim_stat; 1244 } 1245 1246 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 1247 struct list_head *dst, 1248 unsigned long *scanned, int order, 1249 int mode, struct zone *z, 1250 struct mem_cgroup *mem_cont, 1251 int active, int file) 1252 { 1253 unsigned long nr_taken = 0; 1254 struct page *page; 1255 unsigned long scan; 1256 LIST_HEAD(pc_list); 1257 struct list_head *src; 1258 struct page_cgroup *pc, *tmp; 1259 int nid = zone_to_nid(z); 1260 int zid = zone_idx(z); 1261 struct mem_cgroup_per_zone *mz; 1262 int lru = LRU_FILE * file + active; 1263 int ret; 1264 1265 BUG_ON(!mem_cont); 1266 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1267 src = &mz->lists[lru]; 1268 1269 scan = 0; 1270 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1271 if (scan >= nr_to_scan) 1272 break; 1273 1274 if (unlikely(!PageCgroupUsed(pc))) 1275 continue; 1276 1277 page = lookup_cgroup_page(pc); 1278 1279 if (unlikely(!PageLRU(page))) 1280 continue; 1281 1282 scan++; 1283 ret = __isolate_lru_page(page, mode, file); 1284 switch (ret) { 1285 case 0: 1286 list_move(&page->lru, dst); 1287 mem_cgroup_del_lru(page); 1288 nr_taken += hpage_nr_pages(page); 1289 break; 1290 case -EBUSY: 1291 /* we don't affect global LRU but rotate in our LRU */ 1292 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1293 break; 1294 default: 1295 break; 1296 } 1297 } 1298 1299 *scanned = scan; 1300 1301 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, 1302 0, 0, 0, mode); 1303 1304 return nr_taken; 1305 } 1306 1307 #define mem_cgroup_from_res_counter(counter, member) \ 1308 container_of(counter, struct mem_cgroup, member) 1309 1310 /** 1311 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1312 * @mem: the memory cgroup 1313 * 1314 * Returns the maximum amount of memory @mem can be charged with, in 1315 * pages. 1316 */ 1317 static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) 1318 { 1319 unsigned long long margin; 1320 1321 margin = res_counter_margin(&mem->res); 1322 if (do_swap_account) 1323 margin = min(margin, res_counter_margin(&mem->memsw)); 1324 return margin >> PAGE_SHIFT; 1325 } 1326 1327 static unsigned int get_swappiness(struct mem_cgroup *memcg) 1328 { 1329 struct cgroup *cgrp = memcg->css.cgroup; 1330 1331 /* root ? */ 1332 if (cgrp->parent == NULL) 1333 return vm_swappiness; 1334 1335 return memcg->swappiness; 1336 } 1337 1338 static void mem_cgroup_start_move(struct mem_cgroup *mem) 1339 { 1340 int cpu; 1341 1342 get_online_cpus(); 1343 spin_lock(&mem->pcp_counter_lock); 1344 for_each_online_cpu(cpu) 1345 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; 1346 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; 1347 spin_unlock(&mem->pcp_counter_lock); 1348 put_online_cpus(); 1349 1350 synchronize_rcu(); 1351 } 1352 1353 static void mem_cgroup_end_move(struct mem_cgroup *mem) 1354 { 1355 int cpu; 1356 1357 if (!mem) 1358 return; 1359 get_online_cpus(); 1360 spin_lock(&mem->pcp_counter_lock); 1361 for_each_online_cpu(cpu) 1362 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; 1363 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; 1364 spin_unlock(&mem->pcp_counter_lock); 1365 put_online_cpus(); 1366 } 1367 /* 1368 * 2 routines for checking "mem" is under move_account() or not. 1369 * 1370 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used 1371 * for avoiding race in accounting. If true, 1372 * pc->mem_cgroup may be overwritten. 1373 * 1374 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1375 * under hierarchy of moving cgroups. This is for 1376 * waiting at hith-memory prressure caused by "move". 1377 */ 1378 1379 static bool mem_cgroup_stealed(struct mem_cgroup *mem) 1380 { 1381 VM_BUG_ON(!rcu_read_lock_held()); 1382 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1383 } 1384 1385 static bool mem_cgroup_under_move(struct mem_cgroup *mem) 1386 { 1387 struct mem_cgroup *from; 1388 struct mem_cgroup *to; 1389 bool ret = false; 1390 /* 1391 * Unlike task_move routines, we access mc.to, mc.from not under 1392 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1393 */ 1394 spin_lock(&mc.lock); 1395 from = mc.from; 1396 to = mc.to; 1397 if (!from) 1398 goto unlock; 1399 if (from == mem || to == mem 1400 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css)) 1401 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css))) 1402 ret = true; 1403 unlock: 1404 spin_unlock(&mc.lock); 1405 return ret; 1406 } 1407 1408 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) 1409 { 1410 if (mc.moving_task && current != mc.moving_task) { 1411 if (mem_cgroup_under_move(mem)) { 1412 DEFINE_WAIT(wait); 1413 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1414 /* moving charge context might have finished. */ 1415 if (mc.moving_task) 1416 schedule(); 1417 finish_wait(&mc.waitq, &wait); 1418 return true; 1419 } 1420 } 1421 return false; 1422 } 1423 1424 /** 1425 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. 1426 * @memcg: The memory cgroup that went over limit 1427 * @p: Task that is going to be killed 1428 * 1429 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1430 * enabled 1431 */ 1432 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1433 { 1434 struct cgroup *task_cgrp; 1435 struct cgroup *mem_cgrp; 1436 /* 1437 * Need a buffer in BSS, can't rely on allocations. The code relies 1438 * on the assumption that OOM is serialized for memory controller. 1439 * If this assumption is broken, revisit this code. 1440 */ 1441 static char memcg_name[PATH_MAX]; 1442 int ret; 1443 1444 if (!memcg || !p) 1445 return; 1446 1447 1448 rcu_read_lock(); 1449 1450 mem_cgrp = memcg->css.cgroup; 1451 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1452 1453 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1454 if (ret < 0) { 1455 /* 1456 * Unfortunately, we are unable to convert to a useful name 1457 * But we'll still print out the usage information 1458 */ 1459 rcu_read_unlock(); 1460 goto done; 1461 } 1462 rcu_read_unlock(); 1463 1464 printk(KERN_INFO "Task in %s killed", memcg_name); 1465 1466 rcu_read_lock(); 1467 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1468 if (ret < 0) { 1469 rcu_read_unlock(); 1470 goto done; 1471 } 1472 rcu_read_unlock(); 1473 1474 /* 1475 * Continues from above, so we don't need an KERN_ level 1476 */ 1477 printk(KERN_CONT " as a result of limit of %s\n", memcg_name); 1478 done: 1479 1480 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", 1481 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1482 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1483 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1484 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " 1485 "failcnt %llu\n", 1486 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1487 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1488 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1489 } 1490 1491 /* 1492 * This function returns the number of memcg under hierarchy tree. Returns 1493 * 1(self count) if no children. 1494 */ 1495 static int mem_cgroup_count_children(struct mem_cgroup *mem) 1496 { 1497 int num = 0; 1498 struct mem_cgroup *iter; 1499 1500 for_each_mem_cgroup_tree(iter, mem) 1501 num++; 1502 return num; 1503 } 1504 1505 /* 1506 * Return the memory (and swap, if configured) limit for a memcg. 1507 */ 1508 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1509 { 1510 u64 limit; 1511 u64 memsw; 1512 1513 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1514 limit += total_swap_pages << PAGE_SHIFT; 1515 1516 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1517 /* 1518 * If memsw is finite and limits the amount of swap space available 1519 * to this memcg, return that limit. 1520 */ 1521 return min(limit, memsw); 1522 } 1523 1524 /* 1525 * Visit the first child (need not be the first child as per the ordering 1526 * of the cgroup list, since we track last_scanned_child) of @mem and use 1527 * that to reclaim free pages from. 1528 */ 1529 static struct mem_cgroup * 1530 mem_cgroup_select_victim(struct mem_cgroup *root_mem) 1531 { 1532 struct mem_cgroup *ret = NULL; 1533 struct cgroup_subsys_state *css; 1534 int nextid, found; 1535 1536 if (!root_mem->use_hierarchy) { 1537 css_get(&root_mem->css); 1538 ret = root_mem; 1539 } 1540 1541 while (!ret) { 1542 rcu_read_lock(); 1543 nextid = root_mem->last_scanned_child + 1; 1544 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, 1545 &found); 1546 if (css && css_tryget(css)) 1547 ret = container_of(css, struct mem_cgroup, css); 1548 1549 rcu_read_unlock(); 1550 /* Updates scanning parameter */ 1551 if (!css) { 1552 /* this means start scan from ID:1 */ 1553 root_mem->last_scanned_child = 0; 1554 } else 1555 root_mem->last_scanned_child = found; 1556 } 1557 1558 return ret; 1559 } 1560 1561 #if MAX_NUMNODES > 1 1562 1563 /* 1564 * Always updating the nodemask is not very good - even if we have an empty 1565 * list or the wrong list here, we can start from some node and traverse all 1566 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1567 * 1568 */ 1569 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) 1570 { 1571 int nid; 1572 1573 if (time_after(mem->next_scan_node_update, jiffies)) 1574 return; 1575 1576 mem->next_scan_node_update = jiffies + 10*HZ; 1577 /* make a nodemask where this memcg uses memory from */ 1578 mem->scan_nodes = node_states[N_HIGH_MEMORY]; 1579 1580 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { 1581 1582 if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) || 1583 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE)) 1584 continue; 1585 1586 if (total_swap_pages && 1587 (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) || 1588 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON))) 1589 continue; 1590 node_clear(nid, mem->scan_nodes); 1591 } 1592 } 1593 1594 /* 1595 * Selecting a node where we start reclaim from. Because what we need is just 1596 * reducing usage counter, start from anywhere is O,K. Considering 1597 * memory reclaim from current node, there are pros. and cons. 1598 * 1599 * Freeing memory from current node means freeing memory from a node which 1600 * we'll use or we've used. So, it may make LRU bad. And if several threads 1601 * hit limits, it will see a contention on a node. But freeing from remote 1602 * node means more costs for memory reclaim because of memory latency. 1603 * 1604 * Now, we use round-robin. Better algorithm is welcomed. 1605 */ 1606 int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1607 { 1608 int node; 1609 1610 mem_cgroup_may_update_nodemask(mem); 1611 node = mem->last_scanned_node; 1612 1613 node = next_node(node, mem->scan_nodes); 1614 if (node == MAX_NUMNODES) 1615 node = first_node(mem->scan_nodes); 1616 /* 1617 * We call this when we hit limit, not when pages are added to LRU. 1618 * No LRU may hold pages because all pages are UNEVICTABLE or 1619 * memcg is too small and all pages are not on LRU. In that case, 1620 * we use curret node. 1621 */ 1622 if (unlikely(node == MAX_NUMNODES)) 1623 node = numa_node_id(); 1624 1625 mem->last_scanned_node = node; 1626 return node; 1627 } 1628 1629 #else 1630 int mem_cgroup_select_victim_node(struct mem_cgroup *mem) 1631 { 1632 return 0; 1633 } 1634 #endif 1635 1636 /* 1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1638 * we reclaimed from, so that we don't end up penalizing one child extensively 1639 * based on its position in the children list. 1640 * 1641 * root_mem is the original ancestor that we've been reclaim from. 1642 * 1643 * We give up and return to the caller when we visit root_mem twice. 1644 * (other groups can be removed while we're walking....) 1645 * 1646 * If shrink==true, for avoiding to free too much, this returns immedieately. 1647 */ 1648 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1649 struct zone *zone, 1650 gfp_t gfp_mask, 1651 unsigned long reclaim_options, 1652 unsigned long *total_scanned) 1653 { 1654 struct mem_cgroup *victim; 1655 int ret, total = 0; 1656 int loop = 0; 1657 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1660 unsigned long excess; 1661 unsigned long nr_scanned; 1662 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1664 1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1666 if (!check_soft && root_mem->memsw_is_minimum) 1667 noswap = true; 1668 1669 while (1) { 1670 victim = mem_cgroup_select_victim(root_mem); 1671 if (victim == root_mem) { 1672 loop++; 1673 /* 1674 * We are not draining per cpu cached charges during 1675 * soft limit reclaim because global reclaim doesn't 1676 * care about charges. It tries to free some memory and 1677 * charges will not give any. 1678 */ 1679 if (!check_soft && loop >= 1) 1680 drain_all_stock_async(root_mem); 1681 if (loop >= 2) { 1682 /* 1683 * If we have not been able to reclaim 1684 * anything, it might because there are 1685 * no reclaimable pages under this hierarchy 1686 */ 1687 if (!check_soft || !total) { 1688 css_put(&victim->css); 1689 break; 1690 } 1691 /* 1692 * We want to do more targeted reclaim. 1693 * excess >> 2 is not to excessive so as to 1694 * reclaim too much, nor too less that we keep 1695 * coming back to reclaim from this cgroup 1696 */ 1697 if (total >= (excess >> 2) || 1698 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { 1699 css_put(&victim->css); 1700 break; 1701 } 1702 } 1703 } 1704 if (!mem_cgroup_local_usage(victim)) { 1705 /* this cgroup's local usage == 0 */ 1706 css_put(&victim->css); 1707 continue; 1708 } 1709 /* we use swappiness of local cgroup */ 1710 if (check_soft) { 1711 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1712 noswap, get_swappiness(victim), zone, 1713 &nr_scanned); 1714 *total_scanned += nr_scanned; 1715 } else 1716 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1717 noswap, get_swappiness(victim)); 1718 css_put(&victim->css); 1719 /* 1720 * At shrinking usage, we can't check we should stop here or 1721 * reclaim more. It's depends on callers. last_scanned_child 1722 * will work enough for keeping fairness under tree. 1723 */ 1724 if (shrink) 1725 return ret; 1726 total += ret; 1727 if (check_soft) { 1728 if (!res_counter_soft_limit_excess(&root_mem->res)) 1729 return total; 1730 } else if (mem_cgroup_margin(root_mem)) 1731 return total; 1732 } 1733 return total; 1734 } 1735 1736 /* 1737 * Check OOM-Killer is already running under our hierarchy. 1738 * If someone is running, return false. 1739 */ 1740 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1741 { 1742 int x, lock_count = 0; 1743 struct mem_cgroup *iter; 1744 1745 for_each_mem_cgroup_tree(iter, mem) { 1746 x = atomic_inc_return(&iter->oom_lock); 1747 lock_count = max(x, lock_count); 1748 } 1749 1750 if (lock_count == 1) 1751 return true; 1752 return false; 1753 } 1754 1755 static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) 1756 { 1757 struct mem_cgroup *iter; 1758 1759 /* 1760 * When a new child is created while the hierarchy is under oom, 1761 * mem_cgroup_oom_lock() may not be called. We have to use 1762 * atomic_add_unless() here. 1763 */ 1764 for_each_mem_cgroup_tree(iter, mem) 1765 atomic_add_unless(&iter->oom_lock, -1, 0); 1766 return 0; 1767 } 1768 1769 1770 static DEFINE_MUTEX(memcg_oom_mutex); 1771 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1772 1773 struct oom_wait_info { 1774 struct mem_cgroup *mem; 1775 wait_queue_t wait; 1776 }; 1777 1778 static int memcg_oom_wake_function(wait_queue_t *wait, 1779 unsigned mode, int sync, void *arg) 1780 { 1781 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg; 1782 struct oom_wait_info *oom_wait_info; 1783 1784 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1785 1786 if (oom_wait_info->mem == wake_mem) 1787 goto wakeup; 1788 /* if no hierarchy, no match */ 1789 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy) 1790 return 0; 1791 /* 1792 * Both of oom_wait_info->mem and wake_mem are stable under us. 1793 * Then we can use css_is_ancestor without taking care of RCU. 1794 */ 1795 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) && 1796 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css)) 1797 return 0; 1798 1799 wakeup: 1800 return autoremove_wake_function(wait, mode, sync, arg); 1801 } 1802 1803 static void memcg_wakeup_oom(struct mem_cgroup *mem) 1804 { 1805 /* for filtering, pass "mem" as argument. */ 1806 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); 1807 } 1808 1809 static void memcg_oom_recover(struct mem_cgroup *mem) 1810 { 1811 if (mem && atomic_read(&mem->oom_lock)) 1812 memcg_wakeup_oom(mem); 1813 } 1814 1815 /* 1816 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1817 */ 1818 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1819 { 1820 struct oom_wait_info owait; 1821 bool locked, need_to_kill; 1822 1823 owait.mem = mem; 1824 owait.wait.flags = 0; 1825 owait.wait.func = memcg_oom_wake_function; 1826 owait.wait.private = current; 1827 INIT_LIST_HEAD(&owait.wait.task_list); 1828 need_to_kill = true; 1829 /* At first, try to OOM lock hierarchy under mem.*/ 1830 mutex_lock(&memcg_oom_mutex); 1831 locked = mem_cgroup_oom_lock(mem); 1832 /* 1833 * Even if signal_pending(), we can't quit charge() loop without 1834 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1835 * under OOM is always welcomed, use TASK_KILLABLE here. 1836 */ 1837 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1838 if (!locked || mem->oom_kill_disable) 1839 need_to_kill = false; 1840 if (locked) 1841 mem_cgroup_oom_notify(mem); 1842 mutex_unlock(&memcg_oom_mutex); 1843 1844 if (need_to_kill) { 1845 finish_wait(&memcg_oom_waitq, &owait.wait); 1846 mem_cgroup_out_of_memory(mem, mask); 1847 } else { 1848 schedule(); 1849 finish_wait(&memcg_oom_waitq, &owait.wait); 1850 } 1851 mutex_lock(&memcg_oom_mutex); 1852 mem_cgroup_oom_unlock(mem); 1853 memcg_wakeup_oom(mem); 1854 mutex_unlock(&memcg_oom_mutex); 1855 1856 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1857 return false; 1858 /* Give chance to dying process */ 1859 schedule_timeout(1); 1860 return true; 1861 } 1862 1863 /* 1864 * Currently used to update mapped file statistics, but the routine can be 1865 * generalized to update other statistics as well. 1866 * 1867 * Notes: Race condition 1868 * 1869 * We usually use page_cgroup_lock() for accessing page_cgroup member but 1870 * it tends to be costly. But considering some conditions, we doesn't need 1871 * to do so _always_. 1872 * 1873 * Considering "charge", lock_page_cgroup() is not required because all 1874 * file-stat operations happen after a page is attached to radix-tree. There 1875 * are no race with "charge". 1876 * 1877 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 1878 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 1879 * if there are race with "uncharge". Statistics itself is properly handled 1880 * by flags. 1881 * 1882 * Considering "move", this is an only case we see a race. To make the race 1883 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1884 * possibility of race condition. If there is, we take a lock. 1885 */ 1886 1887 void mem_cgroup_update_page_stat(struct page *page, 1888 enum mem_cgroup_page_stat_item idx, int val) 1889 { 1890 struct mem_cgroup *mem; 1891 struct page_cgroup *pc = lookup_page_cgroup(page); 1892 bool need_unlock = false; 1893 unsigned long uninitialized_var(flags); 1894 1895 if (unlikely(!pc)) 1896 return; 1897 1898 rcu_read_lock(); 1899 mem = pc->mem_cgroup; 1900 if (unlikely(!mem || !PageCgroupUsed(pc))) 1901 goto out; 1902 /* pc->mem_cgroup is unstable ? */ 1903 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { 1904 /* take a lock against to access pc->mem_cgroup */ 1905 move_lock_page_cgroup(pc, &flags); 1906 need_unlock = true; 1907 mem = pc->mem_cgroup; 1908 if (!mem || !PageCgroupUsed(pc)) 1909 goto out; 1910 } 1911 1912 switch (idx) { 1913 case MEMCG_NR_FILE_MAPPED: 1914 if (val > 0) 1915 SetPageCgroupFileMapped(pc); 1916 else if (!page_mapped(page)) 1917 ClearPageCgroupFileMapped(pc); 1918 idx = MEM_CGROUP_STAT_FILE_MAPPED; 1919 break; 1920 default: 1921 BUG(); 1922 } 1923 1924 this_cpu_add(mem->stat->count[idx], val); 1925 1926 out: 1927 if (unlikely(need_unlock)) 1928 move_unlock_page_cgroup(pc, &flags); 1929 rcu_read_unlock(); 1930 return; 1931 } 1932 EXPORT_SYMBOL(mem_cgroup_update_page_stat); 1933 1934 /* 1935 * size of first charge trial. "32" comes from vmscan.c's magic value. 1936 * TODO: maybe necessary to use big numbers in big irons. 1937 */ 1938 #define CHARGE_BATCH 32U 1939 struct memcg_stock_pcp { 1940 struct mem_cgroup *cached; /* this never be root cgroup */ 1941 unsigned int nr_pages; 1942 struct work_struct work; 1943 unsigned long flags; 1944 #define FLUSHING_CACHED_CHARGE (0) 1945 }; 1946 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1947 static DEFINE_MUTEX(percpu_charge_mutex); 1948 1949 /* 1950 * Try to consume stocked charge on this cpu. If success, one page is consumed 1951 * from local stock and true is returned. If the stock is 0 or charges from a 1952 * cgroup which is not current target, returns false. This stock will be 1953 * refilled. 1954 */ 1955 static bool consume_stock(struct mem_cgroup *mem) 1956 { 1957 struct memcg_stock_pcp *stock; 1958 bool ret = true; 1959 1960 stock = &get_cpu_var(memcg_stock); 1961 if (mem == stock->cached && stock->nr_pages) 1962 stock->nr_pages--; 1963 else /* need to call res_counter_charge */ 1964 ret = false; 1965 put_cpu_var(memcg_stock); 1966 return ret; 1967 } 1968 1969 /* 1970 * Returns stocks cached in percpu to res_counter and reset cached information. 1971 */ 1972 static void drain_stock(struct memcg_stock_pcp *stock) 1973 { 1974 struct mem_cgroup *old = stock->cached; 1975 1976 if (stock->nr_pages) { 1977 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 1978 1979 res_counter_uncharge(&old->res, bytes); 1980 if (do_swap_account) 1981 res_counter_uncharge(&old->memsw, bytes); 1982 stock->nr_pages = 0; 1983 } 1984 stock->cached = NULL; 1985 } 1986 1987 /* 1988 * This must be called under preempt disabled or must be called by 1989 * a thread which is pinned to local cpu. 1990 */ 1991 static void drain_local_stock(struct work_struct *dummy) 1992 { 1993 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1994 drain_stock(stock); 1995 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1996 } 1997 1998 /* 1999 * Cache charges(val) which is from res_counter, to local per_cpu area. 2000 * This will be consumed by consume_stock() function, later. 2001 */ 2002 static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) 2003 { 2004 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2005 2006 if (stock->cached != mem) { /* reset if necessary */ 2007 drain_stock(stock); 2008 stock->cached = mem; 2009 } 2010 stock->nr_pages += nr_pages; 2011 put_cpu_var(memcg_stock); 2012 } 2013 2014 /* 2015 * Tries to drain stocked charges in other cpus. This function is asynchronous 2016 * and just put a work per cpu for draining localy on each cpu. Caller can 2017 * expects some charges will be back to res_counter later but cannot wait for 2018 * it. 2019 */ 2020 static void drain_all_stock_async(struct mem_cgroup *root_mem) 2021 { 2022 int cpu, curcpu; 2023 /* 2024 * If someone calls draining, avoid adding more kworker runs. 2025 */ 2026 if (!mutex_trylock(&percpu_charge_mutex)) 2027 return; 2028 /* Notify other cpus that system-wide "drain" is running */ 2029 get_online_cpus(); 2030 /* 2031 * Get a hint for avoiding draining charges on the current cpu, 2032 * which must be exhausted by our charging. It is not required that 2033 * this be a precise check, so we use raw_smp_processor_id() instead of 2034 * getcpu()/putcpu(). 2035 */ 2036 curcpu = raw_smp_processor_id(); 2037 for_each_online_cpu(cpu) { 2038 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2039 struct mem_cgroup *mem; 2040 2041 if (cpu == curcpu) 2042 continue; 2043 2044 mem = stock->cached; 2045 if (!mem) 2046 continue; 2047 if (mem != root_mem) { 2048 if (!root_mem->use_hierarchy) 2049 continue; 2050 /* check whether "mem" is under tree of "root_mem" */ 2051 if (!css_is_ancestor(&mem->css, &root_mem->css)) 2052 continue; 2053 } 2054 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2055 schedule_work_on(cpu, &stock->work); 2056 } 2057 put_online_cpus(); 2058 mutex_unlock(&percpu_charge_mutex); 2059 /* We don't wait for flush_work */ 2060 } 2061 2062 /* This is a synchronous drain interface. */ 2063 static void drain_all_stock_sync(void) 2064 { 2065 /* called when force_empty is called */ 2066 mutex_lock(&percpu_charge_mutex); 2067 schedule_on_each_cpu(drain_local_stock); 2068 mutex_unlock(&percpu_charge_mutex); 2069 } 2070 2071 /* 2072 * This function drains percpu counter value from DEAD cpu and 2073 * move it to local cpu. Note that this function can be preempted. 2074 */ 2075 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) 2076 { 2077 int i; 2078 2079 spin_lock(&mem->pcp_counter_lock); 2080 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2081 long x = per_cpu(mem->stat->count[i], cpu); 2082 2083 per_cpu(mem->stat->count[i], cpu) = 0; 2084 mem->nocpu_base.count[i] += x; 2085 } 2086 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2087 unsigned long x = per_cpu(mem->stat->events[i], cpu); 2088 2089 per_cpu(mem->stat->events[i], cpu) = 0; 2090 mem->nocpu_base.events[i] += x; 2091 } 2092 /* need to clear ON_MOVE value, works as a kind of lock. */ 2093 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; 2094 spin_unlock(&mem->pcp_counter_lock); 2095 } 2096 2097 static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) 2098 { 2099 int idx = MEM_CGROUP_ON_MOVE; 2100 2101 spin_lock(&mem->pcp_counter_lock); 2102 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; 2103 spin_unlock(&mem->pcp_counter_lock); 2104 } 2105 2106 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2107 unsigned long action, 2108 void *hcpu) 2109 { 2110 int cpu = (unsigned long)hcpu; 2111 struct memcg_stock_pcp *stock; 2112 struct mem_cgroup *iter; 2113 2114 if ((action == CPU_ONLINE)) { 2115 for_each_mem_cgroup_all(iter) 2116 synchronize_mem_cgroup_on_move(iter, cpu); 2117 return NOTIFY_OK; 2118 } 2119 2120 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2121 return NOTIFY_OK; 2122 2123 for_each_mem_cgroup_all(iter) 2124 mem_cgroup_drain_pcp_counter(iter, cpu); 2125 2126 stock = &per_cpu(memcg_stock, cpu); 2127 drain_stock(stock); 2128 return NOTIFY_OK; 2129 } 2130 2131 2132 /* See __mem_cgroup_try_charge() for details */ 2133 enum { 2134 CHARGE_OK, /* success */ 2135 CHARGE_RETRY, /* need to retry but retry is not bad */ 2136 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2137 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2138 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2139 }; 2140 2141 static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, 2142 unsigned int nr_pages, bool oom_check) 2143 { 2144 unsigned long csize = nr_pages * PAGE_SIZE; 2145 struct mem_cgroup *mem_over_limit; 2146 struct res_counter *fail_res; 2147 unsigned long flags = 0; 2148 int ret; 2149 2150 ret = res_counter_charge(&mem->res, csize, &fail_res); 2151 2152 if (likely(!ret)) { 2153 if (!do_swap_account) 2154 return CHARGE_OK; 2155 ret = res_counter_charge(&mem->memsw, csize, &fail_res); 2156 if (likely(!ret)) 2157 return CHARGE_OK; 2158 2159 res_counter_uncharge(&mem->res, csize); 2160 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2161 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2162 } else 2163 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2164 /* 2165 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2166 * of regular pages (CHARGE_BATCH), or a single regular page (1). 2167 * 2168 * Never reclaim on behalf of optional batching, retry with a 2169 * single page instead. 2170 */ 2171 if (nr_pages == CHARGE_BATCH) 2172 return CHARGE_RETRY; 2173 2174 if (!(gfp_mask & __GFP_WAIT)) 2175 return CHARGE_WOULDBLOCK; 2176 2177 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 2178 gfp_mask, flags, NULL); 2179 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2180 return CHARGE_RETRY; 2181 /* 2182 * Even though the limit is exceeded at this point, reclaim 2183 * may have been able to free some pages. Retry the charge 2184 * before killing the task. 2185 * 2186 * Only for regular pages, though: huge pages are rather 2187 * unlikely to succeed so close to the limit, and we fall back 2188 * to regular pages anyway in case of failure. 2189 */ 2190 if (nr_pages == 1 && ret) 2191 return CHARGE_RETRY; 2192 2193 /* 2194 * At task move, charge accounts can be doubly counted. So, it's 2195 * better to wait until the end of task_move if something is going on. 2196 */ 2197 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2198 return CHARGE_RETRY; 2199 2200 /* If we don't need to call oom-killer at el, return immediately */ 2201 if (!oom_check) 2202 return CHARGE_NOMEM; 2203 /* check OOM */ 2204 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) 2205 return CHARGE_OOM_DIE; 2206 2207 return CHARGE_RETRY; 2208 } 2209 2210 /* 2211 * Unlike exported interface, "oom" parameter is added. if oom==true, 2212 * oom-killer can be invoked. 2213 */ 2214 static int __mem_cgroup_try_charge(struct mm_struct *mm, 2215 gfp_t gfp_mask, 2216 unsigned int nr_pages, 2217 struct mem_cgroup **memcg, 2218 bool oom) 2219 { 2220 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2221 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2222 struct mem_cgroup *mem = NULL; 2223 int ret; 2224 2225 /* 2226 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2227 * in system level. So, allow to go ahead dying process in addition to 2228 * MEMDIE process. 2229 */ 2230 if (unlikely(test_thread_flag(TIF_MEMDIE) 2231 || fatal_signal_pending(current))) 2232 goto bypass; 2233 2234 /* 2235 * We always charge the cgroup the mm_struct belongs to. 2236 * The mm_struct's mem_cgroup changes on task migration if the 2237 * thread group leader migrates. It's possible that mm is not 2238 * set, if so charge the init_mm (happens for pagecache usage). 2239 */ 2240 if (!*memcg && !mm) 2241 goto bypass; 2242 again: 2243 if (*memcg) { /* css should be a valid one */ 2244 mem = *memcg; 2245 VM_BUG_ON(css_is_removed(&mem->css)); 2246 if (mem_cgroup_is_root(mem)) 2247 goto done; 2248 if (nr_pages == 1 && consume_stock(mem)) 2249 goto done; 2250 css_get(&mem->css); 2251 } else { 2252 struct task_struct *p; 2253 2254 rcu_read_lock(); 2255 p = rcu_dereference(mm->owner); 2256 /* 2257 * Because we don't have task_lock(), "p" can exit. 2258 * In that case, "mem" can point to root or p can be NULL with 2259 * race with swapoff. Then, we have small risk of mis-accouning. 2260 * But such kind of mis-account by race always happens because 2261 * we don't have cgroup_mutex(). It's overkill and we allo that 2262 * small race, here. 2263 * (*) swapoff at el will charge against mm-struct not against 2264 * task-struct. So, mm->owner can be NULL. 2265 */ 2266 mem = mem_cgroup_from_task(p); 2267 if (!mem || mem_cgroup_is_root(mem)) { 2268 rcu_read_unlock(); 2269 goto done; 2270 } 2271 if (nr_pages == 1 && consume_stock(mem)) { 2272 /* 2273 * It seems dagerous to access memcg without css_get(). 2274 * But considering how consume_stok works, it's not 2275 * necessary. If consume_stock success, some charges 2276 * from this memcg are cached on this cpu. So, we 2277 * don't need to call css_get()/css_tryget() before 2278 * calling consume_stock(). 2279 */ 2280 rcu_read_unlock(); 2281 goto done; 2282 } 2283 /* after here, we may be blocked. we need to get refcnt */ 2284 if (!css_tryget(&mem->css)) { 2285 rcu_read_unlock(); 2286 goto again; 2287 } 2288 rcu_read_unlock(); 2289 } 2290 2291 do { 2292 bool oom_check; 2293 2294 /* If killed, bypass charge */ 2295 if (fatal_signal_pending(current)) { 2296 css_put(&mem->css); 2297 goto bypass; 2298 } 2299 2300 oom_check = false; 2301 if (oom && !nr_oom_retries) { 2302 oom_check = true; 2303 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2304 } 2305 2306 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); 2307 switch (ret) { 2308 case CHARGE_OK: 2309 break; 2310 case CHARGE_RETRY: /* not in OOM situation but retry */ 2311 batch = nr_pages; 2312 css_put(&mem->css); 2313 mem = NULL; 2314 goto again; 2315 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2316 css_put(&mem->css); 2317 goto nomem; 2318 case CHARGE_NOMEM: /* OOM routine works */ 2319 if (!oom) { 2320 css_put(&mem->css); 2321 goto nomem; 2322 } 2323 /* If oom, we never return -ENOMEM */ 2324 nr_oom_retries--; 2325 break; 2326 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2327 css_put(&mem->css); 2328 goto bypass; 2329 } 2330 } while (ret != CHARGE_OK); 2331 2332 if (batch > nr_pages) 2333 refill_stock(mem, batch - nr_pages); 2334 css_put(&mem->css); 2335 done: 2336 *memcg = mem; 2337 return 0; 2338 nomem: 2339 *memcg = NULL; 2340 return -ENOMEM; 2341 bypass: 2342 *memcg = NULL; 2343 return 0; 2344 } 2345 2346 /* 2347 * Somemtimes we have to undo a charge we got by try_charge(). 2348 * This function is for that and do uncharge, put css's refcnt. 2349 * gotten by try_charge(). 2350 */ 2351 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, 2352 unsigned int nr_pages) 2353 { 2354 if (!mem_cgroup_is_root(mem)) { 2355 unsigned long bytes = nr_pages * PAGE_SIZE; 2356 2357 res_counter_uncharge(&mem->res, bytes); 2358 if (do_swap_account) 2359 res_counter_uncharge(&mem->memsw, bytes); 2360 } 2361 } 2362 2363 /* 2364 * A helper function to get mem_cgroup from ID. must be called under 2365 * rcu_read_lock(). The caller must check css_is_removed() or some if 2366 * it's concern. (dropping refcnt from swap can be called against removed 2367 * memcg.) 2368 */ 2369 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2370 { 2371 struct cgroup_subsys_state *css; 2372 2373 /* ID 0 is unused ID */ 2374 if (!id) 2375 return NULL; 2376 css = css_lookup(&mem_cgroup_subsys, id); 2377 if (!css) 2378 return NULL; 2379 return container_of(css, struct mem_cgroup, css); 2380 } 2381 2382 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2383 { 2384 struct mem_cgroup *mem = NULL; 2385 struct page_cgroup *pc; 2386 unsigned short id; 2387 swp_entry_t ent; 2388 2389 VM_BUG_ON(!PageLocked(page)); 2390 2391 pc = lookup_page_cgroup(page); 2392 lock_page_cgroup(pc); 2393 if (PageCgroupUsed(pc)) { 2394 mem = pc->mem_cgroup; 2395 if (mem && !css_tryget(&mem->css)) 2396 mem = NULL; 2397 } else if (PageSwapCache(page)) { 2398 ent.val = page_private(page); 2399 id = lookup_swap_cgroup(ent); 2400 rcu_read_lock(); 2401 mem = mem_cgroup_lookup(id); 2402 if (mem && !css_tryget(&mem->css)) 2403 mem = NULL; 2404 rcu_read_unlock(); 2405 } 2406 unlock_page_cgroup(pc); 2407 return mem; 2408 } 2409 2410 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, 2411 struct page *page, 2412 unsigned int nr_pages, 2413 struct page_cgroup *pc, 2414 enum charge_type ctype) 2415 { 2416 lock_page_cgroup(pc); 2417 if (unlikely(PageCgroupUsed(pc))) { 2418 unlock_page_cgroup(pc); 2419 __mem_cgroup_cancel_charge(mem, nr_pages); 2420 return; 2421 } 2422 /* 2423 * we don't need page_cgroup_lock about tail pages, becase they are not 2424 * accessed by any other context at this point. 2425 */ 2426 pc->mem_cgroup = mem; 2427 /* 2428 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2429 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2430 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2431 * before USED bit, we need memory barrier here. 2432 * See mem_cgroup_add_lru_list(), etc. 2433 */ 2434 smp_wmb(); 2435 switch (ctype) { 2436 case MEM_CGROUP_CHARGE_TYPE_CACHE: 2437 case MEM_CGROUP_CHARGE_TYPE_SHMEM: 2438 SetPageCgroupCache(pc); 2439 SetPageCgroupUsed(pc); 2440 break; 2441 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2442 ClearPageCgroupCache(pc); 2443 SetPageCgroupUsed(pc); 2444 break; 2445 default: 2446 break; 2447 } 2448 2449 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); 2450 unlock_page_cgroup(pc); 2451 /* 2452 * "charge_statistics" updated event counter. Then, check it. 2453 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2454 * if they exceeds softlimit. 2455 */ 2456 memcg_check_events(mem, page); 2457 } 2458 2459 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2460 2461 #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ 2462 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) 2463 /* 2464 * Because tail pages are not marked as "used", set it. We're under 2465 * zone->lru_lock, 'splitting on pmd' and compund_lock. 2466 */ 2467 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) 2468 { 2469 struct page_cgroup *head_pc = lookup_page_cgroup(head); 2470 struct page_cgroup *tail_pc = lookup_page_cgroup(tail); 2471 unsigned long flags; 2472 2473 if (mem_cgroup_disabled()) 2474 return; 2475 /* 2476 * We have no races with charge/uncharge but will have races with 2477 * page state accounting. 2478 */ 2479 move_lock_page_cgroup(head_pc, &flags); 2480 2481 tail_pc->mem_cgroup = head_pc->mem_cgroup; 2482 smp_wmb(); /* see __commit_charge() */ 2483 if (PageCgroupAcctLRU(head_pc)) { 2484 enum lru_list lru; 2485 struct mem_cgroup_per_zone *mz; 2486 2487 /* 2488 * LRU flags cannot be copied because we need to add tail 2489 *.page to LRU by generic call and our hook will be called. 2490 * We hold lru_lock, then, reduce counter directly. 2491 */ 2492 lru = page_lru(head); 2493 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); 2494 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 2495 } 2496 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2497 move_unlock_page_cgroup(head_pc, &flags); 2498 } 2499 #endif 2500 2501 /** 2502 * mem_cgroup_move_account - move account of the page 2503 * @page: the page 2504 * @nr_pages: number of regular pages (>1 for huge pages) 2505 * @pc: page_cgroup of the page. 2506 * @from: mem_cgroup which the page is moved from. 2507 * @to: mem_cgroup which the page is moved to. @from != @to. 2508 * @uncharge: whether we should call uncharge and css_put against @from. 2509 * 2510 * The caller must confirm following. 2511 * - page is not on LRU (isolate_page() is useful.) 2512 * - compound_lock is held when nr_pages > 1 2513 * 2514 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2515 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2516 * true, this function does "uncharge" from old cgroup, but it doesn't if 2517 * @uncharge is false, so a caller should do "uncharge". 2518 */ 2519 static int mem_cgroup_move_account(struct page *page, 2520 unsigned int nr_pages, 2521 struct page_cgroup *pc, 2522 struct mem_cgroup *from, 2523 struct mem_cgroup *to, 2524 bool uncharge) 2525 { 2526 unsigned long flags; 2527 int ret; 2528 2529 VM_BUG_ON(from == to); 2530 VM_BUG_ON(PageLRU(page)); 2531 /* 2532 * The page is isolated from LRU. So, collapse function 2533 * will not handle this page. But page splitting can happen. 2534 * Do this check under compound_page_lock(). The caller should 2535 * hold it. 2536 */ 2537 ret = -EBUSY; 2538 if (nr_pages > 1 && !PageTransHuge(page)) 2539 goto out; 2540 2541 lock_page_cgroup(pc); 2542 2543 ret = -EINVAL; 2544 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 2545 goto unlock; 2546 2547 move_lock_page_cgroup(pc, &flags); 2548 2549 if (PageCgroupFileMapped(pc)) { 2550 /* Update mapped_file data for mem_cgroup */ 2551 preempt_disable(); 2552 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2553 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2554 preempt_enable(); 2555 } 2556 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2557 if (uncharge) 2558 /* This is not "cancel", but cancel_charge does all we need. */ 2559 __mem_cgroup_cancel_charge(from, nr_pages); 2560 2561 /* caller should have done css_get */ 2562 pc->mem_cgroup = to; 2563 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2564 /* 2565 * We charges against "to" which may not have any tasks. Then, "to" 2566 * can be under rmdir(). But in current implementation, caller of 2567 * this function is just force_empty() and move charge, so it's 2568 * guaranteed that "to" is never removed. So, we don't check rmdir 2569 * status here. 2570 */ 2571 move_unlock_page_cgroup(pc, &flags); 2572 ret = 0; 2573 unlock: 2574 unlock_page_cgroup(pc); 2575 /* 2576 * check events 2577 */ 2578 memcg_check_events(to, page); 2579 memcg_check_events(from, page); 2580 out: 2581 return ret; 2582 } 2583 2584 /* 2585 * move charges to its parent. 2586 */ 2587 2588 static int mem_cgroup_move_parent(struct page *page, 2589 struct page_cgroup *pc, 2590 struct mem_cgroup *child, 2591 gfp_t gfp_mask) 2592 { 2593 struct cgroup *cg = child->css.cgroup; 2594 struct cgroup *pcg = cg->parent; 2595 struct mem_cgroup *parent; 2596 unsigned int nr_pages; 2597 unsigned long uninitialized_var(flags); 2598 int ret; 2599 2600 /* Is ROOT ? */ 2601 if (!pcg) 2602 return -EINVAL; 2603 2604 ret = -EBUSY; 2605 if (!get_page_unless_zero(page)) 2606 goto out; 2607 if (isolate_lru_page(page)) 2608 goto put; 2609 2610 nr_pages = hpage_nr_pages(page); 2611 2612 parent = mem_cgroup_from_cont(pcg); 2613 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2614 if (ret || !parent) 2615 goto put_back; 2616 2617 if (nr_pages > 1) 2618 flags = compound_lock_irqsave(page); 2619 2620 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2621 if (ret) 2622 __mem_cgroup_cancel_charge(parent, nr_pages); 2623 2624 if (nr_pages > 1) 2625 compound_unlock_irqrestore(page, flags); 2626 put_back: 2627 putback_lru_page(page); 2628 put: 2629 put_page(page); 2630 out: 2631 return ret; 2632 } 2633 2634 /* 2635 * Charge the memory controller for page usage. 2636 * Return 2637 * 0 if the charge was successful 2638 * < 0 if the cgroup is over its limit 2639 */ 2640 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 2641 gfp_t gfp_mask, enum charge_type ctype) 2642 { 2643 struct mem_cgroup *mem = NULL; 2644 unsigned int nr_pages = 1; 2645 struct page_cgroup *pc; 2646 bool oom = true; 2647 int ret; 2648 2649 if (PageTransHuge(page)) { 2650 nr_pages <<= compound_order(page); 2651 VM_BUG_ON(!PageTransHuge(page)); 2652 /* 2653 * Never OOM-kill a process for a huge page. The 2654 * fault handler will fall back to regular pages. 2655 */ 2656 oom = false; 2657 } 2658 2659 pc = lookup_page_cgroup(page); 2660 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ 2661 2662 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); 2663 if (ret || !mem) 2664 return ret; 2665 2666 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); 2667 return 0; 2668 } 2669 2670 int mem_cgroup_newpage_charge(struct page *page, 2671 struct mm_struct *mm, gfp_t gfp_mask) 2672 { 2673 if (mem_cgroup_disabled()) 2674 return 0; 2675 /* 2676 * If already mapped, we don't have to account. 2677 * If page cache, page->mapping has address_space. 2678 * But page->mapping may have out-of-use anon_vma pointer, 2679 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping 2680 * is NULL. 2681 */ 2682 if (page_mapped(page) || (page->mapping && !PageAnon(page))) 2683 return 0; 2684 if (unlikely(!mm)) 2685 mm = &init_mm; 2686 return mem_cgroup_charge_common(page, mm, gfp_mask, 2687 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2688 } 2689 2690 static void 2691 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2692 enum charge_type ctype); 2693 2694 static void 2695 __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, 2696 enum charge_type ctype) 2697 { 2698 struct page_cgroup *pc = lookup_page_cgroup(page); 2699 /* 2700 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page 2701 * is already on LRU. It means the page may on some other page_cgroup's 2702 * LRU. Take care of it. 2703 */ 2704 mem_cgroup_lru_del_before_commit(page); 2705 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 2706 mem_cgroup_lru_add_after_commit(page); 2707 return; 2708 } 2709 2710 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2711 gfp_t gfp_mask) 2712 { 2713 struct mem_cgroup *mem = NULL; 2714 int ret; 2715 2716 if (mem_cgroup_disabled()) 2717 return 0; 2718 if (PageCompound(page)) 2719 return 0; 2720 /* 2721 * Corner case handling. This is called from add_to_page_cache() 2722 * in usual. But some FS (shmem) precharges this page before calling it 2723 * and call add_to_page_cache() with GFP_NOWAIT. 2724 * 2725 * For GFP_NOWAIT case, the page may be pre-charged before calling 2726 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 2727 * charge twice. (It works but has to pay a bit larger cost.) 2728 * And when the page is SwapCache, it should take swap information 2729 * into account. This is under lock_page() now. 2730 */ 2731 if (!(gfp_mask & __GFP_WAIT)) { 2732 struct page_cgroup *pc; 2733 2734 pc = lookup_page_cgroup(page); 2735 if (!pc) 2736 return 0; 2737 lock_page_cgroup(pc); 2738 if (PageCgroupUsed(pc)) { 2739 unlock_page_cgroup(pc); 2740 return 0; 2741 } 2742 unlock_page_cgroup(pc); 2743 } 2744 2745 if (unlikely(!mm)) 2746 mm = &init_mm; 2747 2748 if (page_is_file_cache(page)) { 2749 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true); 2750 if (ret || !mem) 2751 return ret; 2752 2753 /* 2754 * FUSE reuses pages without going through the final 2755 * put that would remove them from the LRU list, make 2756 * sure that they get relinked properly. 2757 */ 2758 __mem_cgroup_commit_charge_lrucare(page, mem, 2759 MEM_CGROUP_CHARGE_TYPE_CACHE); 2760 return ret; 2761 } 2762 /* shmem */ 2763 if (PageSwapCache(page)) { 2764 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 2765 if (!ret) 2766 __mem_cgroup_commit_charge_swapin(page, mem, 2767 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2768 } else 2769 ret = mem_cgroup_charge_common(page, mm, gfp_mask, 2770 MEM_CGROUP_CHARGE_TYPE_SHMEM); 2771 2772 return ret; 2773 } 2774 2775 /* 2776 * While swap-in, try_charge -> commit or cancel, the page is locked. 2777 * And when try_charge() successfully returns, one refcnt to memcg without 2778 * struct page_cgroup is acquired. This refcnt will be consumed by 2779 * "commit()" or removed by "cancel()" 2780 */ 2781 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 2782 struct page *page, 2783 gfp_t mask, struct mem_cgroup **ptr) 2784 { 2785 struct mem_cgroup *mem; 2786 int ret; 2787 2788 *ptr = NULL; 2789 2790 if (mem_cgroup_disabled()) 2791 return 0; 2792 2793 if (!do_swap_account) 2794 goto charge_cur_mm; 2795 /* 2796 * A racing thread's fault, or swapoff, may have already updated 2797 * the pte, and even removed page from swap cache: in those cases 2798 * do_swap_page()'s pte_same() test will fail; but there's also a 2799 * KSM case which does need to charge the page. 2800 */ 2801 if (!PageSwapCache(page)) 2802 goto charge_cur_mm; 2803 mem = try_get_mem_cgroup_from_page(page); 2804 if (!mem) 2805 goto charge_cur_mm; 2806 *ptr = mem; 2807 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); 2808 css_put(&mem->css); 2809 return ret; 2810 charge_cur_mm: 2811 if (unlikely(!mm)) 2812 mm = &init_mm; 2813 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); 2814 } 2815 2816 static void 2817 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2818 enum charge_type ctype) 2819 { 2820 if (mem_cgroup_disabled()) 2821 return; 2822 if (!ptr) 2823 return; 2824 cgroup_exclude_rmdir(&ptr->css); 2825 2826 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype); 2827 /* 2828 * Now swap is on-memory. This means this page may be 2829 * counted both as mem and swap....double count. 2830 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 2831 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 2832 * may call delete_from_swap_cache() before reach here. 2833 */ 2834 if (do_swap_account && PageSwapCache(page)) { 2835 swp_entry_t ent = {.val = page_private(page)}; 2836 unsigned short id; 2837 struct mem_cgroup *memcg; 2838 2839 id = swap_cgroup_record(ent, 0); 2840 rcu_read_lock(); 2841 memcg = mem_cgroup_lookup(id); 2842 if (memcg) { 2843 /* 2844 * This recorded memcg can be obsolete one. So, avoid 2845 * calling css_tryget 2846 */ 2847 if (!mem_cgroup_is_root(memcg)) 2848 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 2849 mem_cgroup_swap_statistics(memcg, false); 2850 mem_cgroup_put(memcg); 2851 } 2852 rcu_read_unlock(); 2853 } 2854 /* 2855 * At swapin, we may charge account against cgroup which has no tasks. 2856 * So, rmdir()->pre_destroy() can be called while we do this charge. 2857 * In that case, we need to call pre_destroy() again. check it here. 2858 */ 2859 cgroup_release_and_wakeup_rmdir(&ptr->css); 2860 } 2861 2862 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) 2863 { 2864 __mem_cgroup_commit_charge_swapin(page, ptr, 2865 MEM_CGROUP_CHARGE_TYPE_MAPPED); 2866 } 2867 2868 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 2869 { 2870 if (mem_cgroup_disabled()) 2871 return; 2872 if (!mem) 2873 return; 2874 __mem_cgroup_cancel_charge(mem, 1); 2875 } 2876 2877 static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, 2878 unsigned int nr_pages, 2879 const enum charge_type ctype) 2880 { 2881 struct memcg_batch_info *batch = NULL; 2882 bool uncharge_memsw = true; 2883 2884 /* If swapout, usage of swap doesn't decrease */ 2885 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2886 uncharge_memsw = false; 2887 2888 batch = ¤t->memcg_batch; 2889 /* 2890 * In usual, we do css_get() when we remember memcg pointer. 2891 * But in this case, we keep res->usage until end of a series of 2892 * uncharges. Then, it's ok to ignore memcg's refcnt. 2893 */ 2894 if (!batch->memcg) 2895 batch->memcg = mem; 2896 /* 2897 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 2898 * In those cases, all pages freed continuously can be expected to be in 2899 * the same cgroup and we have chance to coalesce uncharges. 2900 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 2901 * because we want to do uncharge as soon as possible. 2902 */ 2903 2904 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 2905 goto direct_uncharge; 2906 2907 if (nr_pages > 1) 2908 goto direct_uncharge; 2909 2910 /* 2911 * In typical case, batch->memcg == mem. This means we can 2912 * merge a series of uncharges to an uncharge of res_counter. 2913 * If not, we uncharge res_counter ony by one. 2914 */ 2915 if (batch->memcg != mem) 2916 goto direct_uncharge; 2917 /* remember freed charge and uncharge it later */ 2918 batch->nr_pages++; 2919 if (uncharge_memsw) 2920 batch->memsw_nr_pages++; 2921 return; 2922 direct_uncharge: 2923 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); 2924 if (uncharge_memsw) 2925 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); 2926 if (unlikely(batch->memcg != mem)) 2927 memcg_oom_recover(mem); 2928 return; 2929 } 2930 2931 /* 2932 * uncharge if !page_mapped(page) 2933 */ 2934 static struct mem_cgroup * 2935 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 2936 { 2937 struct mem_cgroup *mem = NULL; 2938 unsigned int nr_pages = 1; 2939 struct page_cgroup *pc; 2940 2941 if (mem_cgroup_disabled()) 2942 return NULL; 2943 2944 if (PageSwapCache(page)) 2945 return NULL; 2946 2947 if (PageTransHuge(page)) { 2948 nr_pages <<= compound_order(page); 2949 VM_BUG_ON(!PageTransHuge(page)); 2950 } 2951 /* 2952 * Check if our page_cgroup is valid 2953 */ 2954 pc = lookup_page_cgroup(page); 2955 if (unlikely(!pc || !PageCgroupUsed(pc))) 2956 return NULL; 2957 2958 lock_page_cgroup(pc); 2959 2960 mem = pc->mem_cgroup; 2961 2962 if (!PageCgroupUsed(pc)) 2963 goto unlock_out; 2964 2965 switch (ctype) { 2966 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2967 case MEM_CGROUP_CHARGE_TYPE_DROP: 2968 /* See mem_cgroup_prepare_migration() */ 2969 if (page_mapped(page) || PageCgroupMigration(pc)) 2970 goto unlock_out; 2971 break; 2972 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 2973 if (!PageAnon(page)) { /* Shared memory */ 2974 if (page->mapping && !page_is_file_cache(page)) 2975 goto unlock_out; 2976 } else if (page_mapped(page)) /* Anon */ 2977 goto unlock_out; 2978 break; 2979 default: 2980 break; 2981 } 2982 2983 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); 2984 2985 ClearPageCgroupUsed(pc); 2986 /* 2987 * pc->mem_cgroup is not cleared here. It will be accessed when it's 2988 * freed from LRU. This is safe because uncharged page is expected not 2989 * to be reused (freed soon). Exception is SwapCache, it's handled by 2990 * special functions. 2991 */ 2992 2993 unlock_page_cgroup(pc); 2994 /* 2995 * even after unlock, we have mem->res.usage here and this memcg 2996 * will never be freed. 2997 */ 2998 memcg_check_events(mem, page); 2999 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 3000 mem_cgroup_swap_statistics(mem, true); 3001 mem_cgroup_get(mem); 3002 } 3003 if (!mem_cgroup_is_root(mem)) 3004 mem_cgroup_do_uncharge(mem, nr_pages, ctype); 3005 3006 return mem; 3007 3008 unlock_out: 3009 unlock_page_cgroup(pc); 3010 return NULL; 3011 } 3012 3013 void mem_cgroup_uncharge_page(struct page *page) 3014 { 3015 /* early check. */ 3016 if (page_mapped(page)) 3017 return; 3018 if (page->mapping && !PageAnon(page)) 3019 return; 3020 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); 3021 } 3022 3023 void mem_cgroup_uncharge_cache_page(struct page *page) 3024 { 3025 VM_BUG_ON(page_mapped(page)); 3026 VM_BUG_ON(page->mapping); 3027 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 3028 } 3029 3030 /* 3031 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 3032 * In that cases, pages are freed continuously and we can expect pages 3033 * are in the same memcg. All these calls itself limits the number of 3034 * pages freed at once, then uncharge_start/end() is called properly. 3035 * This may be called prural(2) times in a context, 3036 */ 3037 3038 void mem_cgroup_uncharge_start(void) 3039 { 3040 current->memcg_batch.do_batch++; 3041 /* We can do nest. */ 3042 if (current->memcg_batch.do_batch == 1) { 3043 current->memcg_batch.memcg = NULL; 3044 current->memcg_batch.nr_pages = 0; 3045 current->memcg_batch.memsw_nr_pages = 0; 3046 } 3047 } 3048 3049 void mem_cgroup_uncharge_end(void) 3050 { 3051 struct memcg_batch_info *batch = ¤t->memcg_batch; 3052 3053 if (!batch->do_batch) 3054 return; 3055 3056 batch->do_batch--; 3057 if (batch->do_batch) /* If stacked, do nothing. */ 3058 return; 3059 3060 if (!batch->memcg) 3061 return; 3062 /* 3063 * This "batch->memcg" is valid without any css_get/put etc... 3064 * bacause we hide charges behind us. 3065 */ 3066 if (batch->nr_pages) 3067 res_counter_uncharge(&batch->memcg->res, 3068 batch->nr_pages * PAGE_SIZE); 3069 if (batch->memsw_nr_pages) 3070 res_counter_uncharge(&batch->memcg->memsw, 3071 batch->memsw_nr_pages * PAGE_SIZE); 3072 memcg_oom_recover(batch->memcg); 3073 /* forget this pointer (for sanity check) */ 3074 batch->memcg = NULL; 3075 } 3076 3077 #ifdef CONFIG_SWAP 3078 /* 3079 * called after __delete_from_swap_cache() and drop "page" account. 3080 * memcg information is recorded to swap_cgroup of "ent" 3081 */ 3082 void 3083 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 3084 { 3085 struct mem_cgroup *memcg; 3086 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 3087 3088 if (!swapout) /* this was a swap cache but the swap is unused ! */ 3089 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 3090 3091 memcg = __mem_cgroup_uncharge_common(page, ctype); 3092 3093 /* 3094 * record memcg information, if swapout && memcg != NULL, 3095 * mem_cgroup_get() was called in uncharge(). 3096 */ 3097 if (do_swap_account && swapout && memcg) 3098 swap_cgroup_record(ent, css_id(&memcg->css)); 3099 } 3100 #endif 3101 3102 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3103 /* 3104 * called from swap_entry_free(). remove record in swap_cgroup and 3105 * uncharge "memsw" account. 3106 */ 3107 void mem_cgroup_uncharge_swap(swp_entry_t ent) 3108 { 3109 struct mem_cgroup *memcg; 3110 unsigned short id; 3111 3112 if (!do_swap_account) 3113 return; 3114 3115 id = swap_cgroup_record(ent, 0); 3116 rcu_read_lock(); 3117 memcg = mem_cgroup_lookup(id); 3118 if (memcg) { 3119 /* 3120 * We uncharge this because swap is freed. 3121 * This memcg can be obsolete one. We avoid calling css_tryget 3122 */ 3123 if (!mem_cgroup_is_root(memcg)) 3124 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 3125 mem_cgroup_swap_statistics(memcg, false); 3126 mem_cgroup_put(memcg); 3127 } 3128 rcu_read_unlock(); 3129 } 3130 3131 /** 3132 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3133 * @entry: swap entry to be moved 3134 * @from: mem_cgroup which the entry is moved from 3135 * @to: mem_cgroup which the entry is moved to 3136 * @need_fixup: whether we should fixup res_counters and refcounts. 3137 * 3138 * It succeeds only when the swap_cgroup's record for this entry is the same 3139 * as the mem_cgroup's id of @from. 3140 * 3141 * Returns 0 on success, -EINVAL on failure. 3142 * 3143 * The caller must have charged to @to, IOW, called res_counter_charge() about 3144 * both res and memsw, and called css_get(). 3145 */ 3146 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3147 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3148 { 3149 unsigned short old_id, new_id; 3150 3151 old_id = css_id(&from->css); 3152 new_id = css_id(&to->css); 3153 3154 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3155 mem_cgroup_swap_statistics(from, false); 3156 mem_cgroup_swap_statistics(to, true); 3157 /* 3158 * This function is only called from task migration context now. 3159 * It postpones res_counter and refcount handling till the end 3160 * of task migration(mem_cgroup_clear_mc()) for performance 3161 * improvement. But we cannot postpone mem_cgroup_get(to) 3162 * because if the process that has been moved to @to does 3163 * swap-in, the refcount of @to might be decreased to 0. 3164 */ 3165 mem_cgroup_get(to); 3166 if (need_fixup) { 3167 if (!mem_cgroup_is_root(from)) 3168 res_counter_uncharge(&from->memsw, PAGE_SIZE); 3169 mem_cgroup_put(from); 3170 /* 3171 * we charged both to->res and to->memsw, so we should 3172 * uncharge to->res. 3173 */ 3174 if (!mem_cgroup_is_root(to)) 3175 res_counter_uncharge(&to->res, PAGE_SIZE); 3176 } 3177 return 0; 3178 } 3179 return -EINVAL; 3180 } 3181 #else 3182 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3183 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3184 { 3185 return -EINVAL; 3186 } 3187 #endif 3188 3189 /* 3190 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 3191 * page belongs to. 3192 */ 3193 int mem_cgroup_prepare_migration(struct page *page, 3194 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) 3195 { 3196 struct mem_cgroup *mem = NULL; 3197 struct page_cgroup *pc; 3198 enum charge_type ctype; 3199 int ret = 0; 3200 3201 *ptr = NULL; 3202 3203 VM_BUG_ON(PageTransHuge(page)); 3204 if (mem_cgroup_disabled()) 3205 return 0; 3206 3207 pc = lookup_page_cgroup(page); 3208 lock_page_cgroup(pc); 3209 if (PageCgroupUsed(pc)) { 3210 mem = pc->mem_cgroup; 3211 css_get(&mem->css); 3212 /* 3213 * At migrating an anonymous page, its mapcount goes down 3214 * to 0 and uncharge() will be called. But, even if it's fully 3215 * unmapped, migration may fail and this page has to be 3216 * charged again. We set MIGRATION flag here and delay uncharge 3217 * until end_migration() is called 3218 * 3219 * Corner Case Thinking 3220 * A) 3221 * When the old page was mapped as Anon and it's unmap-and-freed 3222 * while migration was ongoing. 3223 * If unmap finds the old page, uncharge() of it will be delayed 3224 * until end_migration(). If unmap finds a new page, it's 3225 * uncharged when it make mapcount to be 1->0. If unmap code 3226 * finds swap_migration_entry, the new page will not be mapped 3227 * and end_migration() will find it(mapcount==0). 3228 * 3229 * B) 3230 * When the old page was mapped but migraion fails, the kernel 3231 * remaps it. A charge for it is kept by MIGRATION flag even 3232 * if mapcount goes down to 0. We can do remap successfully 3233 * without charging it again. 3234 * 3235 * C) 3236 * The "old" page is under lock_page() until the end of 3237 * migration, so, the old page itself will not be swapped-out. 3238 * If the new page is swapped out before end_migraton, our 3239 * hook to usual swap-out path will catch the event. 3240 */ 3241 if (PageAnon(page)) 3242 SetPageCgroupMigration(pc); 3243 } 3244 unlock_page_cgroup(pc); 3245 /* 3246 * If the page is not charged at this point, 3247 * we return here. 3248 */ 3249 if (!mem) 3250 return 0; 3251 3252 *ptr = mem; 3253 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); 3254 css_put(&mem->css);/* drop extra refcnt */ 3255 if (ret || *ptr == NULL) { 3256 if (PageAnon(page)) { 3257 lock_page_cgroup(pc); 3258 ClearPageCgroupMigration(pc); 3259 unlock_page_cgroup(pc); 3260 /* 3261 * The old page may be fully unmapped while we kept it. 3262 */ 3263 mem_cgroup_uncharge_page(page); 3264 } 3265 return -ENOMEM; 3266 } 3267 /* 3268 * We charge new page before it's used/mapped. So, even if unlock_page() 3269 * is called before end_migration, we can catch all events on this new 3270 * page. In the case new page is migrated but not remapped, new page's 3271 * mapcount will be finally 0 and we call uncharge in end_migration(). 3272 */ 3273 pc = lookup_page_cgroup(newpage); 3274 if (PageAnon(page)) 3275 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; 3276 else if (page_is_file_cache(page)) 3277 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3278 else 3279 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3280 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); 3281 return ret; 3282 } 3283 3284 /* remove redundant charge if migration failed*/ 3285 void mem_cgroup_end_migration(struct mem_cgroup *mem, 3286 struct page *oldpage, struct page *newpage, bool migration_ok) 3287 { 3288 struct page *used, *unused; 3289 struct page_cgroup *pc; 3290 3291 if (!mem) 3292 return; 3293 /* blocks rmdir() */ 3294 cgroup_exclude_rmdir(&mem->css); 3295 if (!migration_ok) { 3296 used = oldpage; 3297 unused = newpage; 3298 } else { 3299 used = newpage; 3300 unused = oldpage; 3301 } 3302 /* 3303 * We disallowed uncharge of pages under migration because mapcount 3304 * of the page goes down to zero, temporarly. 3305 * Clear the flag and check the page should be charged. 3306 */ 3307 pc = lookup_page_cgroup(oldpage); 3308 lock_page_cgroup(pc); 3309 ClearPageCgroupMigration(pc); 3310 unlock_page_cgroup(pc); 3311 3312 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3313 3314 /* 3315 * If a page is a file cache, radix-tree replacement is very atomic 3316 * and we can skip this check. When it was an Anon page, its mapcount 3317 * goes down to 0. But because we added MIGRATION flage, it's not 3318 * uncharged yet. There are several case but page->mapcount check 3319 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3320 * check. (see prepare_charge() also) 3321 */ 3322 if (PageAnon(used)) 3323 mem_cgroup_uncharge_page(used); 3324 /* 3325 * At migration, we may charge account against cgroup which has no 3326 * tasks. 3327 * So, rmdir()->pre_destroy() can be called while we do this charge. 3328 * In that case, we need to call pre_destroy() again. check it here. 3329 */ 3330 cgroup_release_and_wakeup_rmdir(&mem->css); 3331 } 3332 3333 /* 3334 * A call to try to shrink memory usage on charge failure at shmem's swapin. 3335 * Calling hierarchical_reclaim is not enough because we should update 3336 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. 3337 * Moreover considering hierarchy, we should reclaim from the mem_over_limit, 3338 * not from the memcg which this page would be charged to. 3339 * try_charge_swapin does all of these works properly. 3340 */ 3341 int mem_cgroup_shmem_charge_fallback(struct page *page, 3342 struct mm_struct *mm, 3343 gfp_t gfp_mask) 3344 { 3345 struct mem_cgroup *mem; 3346 int ret; 3347 3348 if (mem_cgroup_disabled()) 3349 return 0; 3350 3351 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); 3352 if (!ret) 3353 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ 3354 3355 return ret; 3356 } 3357 3358 #ifdef CONFIG_DEBUG_VM 3359 static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3360 { 3361 struct page_cgroup *pc; 3362 3363 pc = lookup_page_cgroup(page); 3364 if (likely(pc) && PageCgroupUsed(pc)) 3365 return pc; 3366 return NULL; 3367 } 3368 3369 bool mem_cgroup_bad_page_check(struct page *page) 3370 { 3371 if (mem_cgroup_disabled()) 3372 return false; 3373 3374 return lookup_page_cgroup_used(page) != NULL; 3375 } 3376 3377 void mem_cgroup_print_bad_page(struct page *page) 3378 { 3379 struct page_cgroup *pc; 3380 3381 pc = lookup_page_cgroup_used(page); 3382 if (pc) { 3383 int ret = -1; 3384 char *path; 3385 3386 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", 3387 pc, pc->flags, pc->mem_cgroup); 3388 3389 path = kmalloc(PATH_MAX, GFP_KERNEL); 3390 if (path) { 3391 rcu_read_lock(); 3392 ret = cgroup_path(pc->mem_cgroup->css.cgroup, 3393 path, PATH_MAX); 3394 rcu_read_unlock(); 3395 } 3396 3397 printk(KERN_CONT "(%s)\n", 3398 (ret < 0) ? "cannot get the path" : path); 3399 kfree(path); 3400 } 3401 } 3402 #endif 3403 3404 static DEFINE_MUTEX(set_limit_mutex); 3405 3406 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 3407 unsigned long long val) 3408 { 3409 int retry_count; 3410 u64 memswlimit, memlimit; 3411 int ret = 0; 3412 int children = mem_cgroup_count_children(memcg); 3413 u64 curusage, oldusage; 3414 int enlarge; 3415 3416 /* 3417 * For keeping hierarchical_reclaim simple, how long we should retry 3418 * is depends on callers. We set our retry-count to be function 3419 * of # of children which we should visit in this loop. 3420 */ 3421 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 3422 3423 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3424 3425 enlarge = 0; 3426 while (retry_count) { 3427 if (signal_pending(current)) { 3428 ret = -EINTR; 3429 break; 3430 } 3431 /* 3432 * Rather than hide all in some function, I do this in 3433 * open coded manner. You see what this really does. 3434 * We have to guarantee mem->res.limit < mem->memsw.limit. 3435 */ 3436 mutex_lock(&set_limit_mutex); 3437 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3438 if (memswlimit < val) { 3439 ret = -EINVAL; 3440 mutex_unlock(&set_limit_mutex); 3441 break; 3442 } 3443 3444 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3445 if (memlimit < val) 3446 enlarge = 1; 3447 3448 ret = res_counter_set_limit(&memcg->res, val); 3449 if (!ret) { 3450 if (memswlimit == val) 3451 memcg->memsw_is_minimum = true; 3452 else 3453 memcg->memsw_is_minimum = false; 3454 } 3455 mutex_unlock(&set_limit_mutex); 3456 3457 if (!ret) 3458 break; 3459 3460 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3461 MEM_CGROUP_RECLAIM_SHRINK, 3462 NULL); 3463 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3464 /* Usage is reduced ? */ 3465 if (curusage >= oldusage) 3466 retry_count--; 3467 else 3468 oldusage = curusage; 3469 } 3470 if (!ret && enlarge) 3471 memcg_oom_recover(memcg); 3472 3473 return ret; 3474 } 3475 3476 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 3477 unsigned long long val) 3478 { 3479 int retry_count; 3480 u64 memlimit, memswlimit, oldusage, curusage; 3481 int children = mem_cgroup_count_children(memcg); 3482 int ret = -EBUSY; 3483 int enlarge = 0; 3484 3485 /* see mem_cgroup_resize_res_limit */ 3486 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 3487 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3488 while (retry_count) { 3489 if (signal_pending(current)) { 3490 ret = -EINTR; 3491 break; 3492 } 3493 /* 3494 * Rather than hide all in some function, I do this in 3495 * open coded manner. You see what this really does. 3496 * We have to guarantee mem->res.limit < mem->memsw.limit. 3497 */ 3498 mutex_lock(&set_limit_mutex); 3499 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3500 if (memlimit > val) { 3501 ret = -EINVAL; 3502 mutex_unlock(&set_limit_mutex); 3503 break; 3504 } 3505 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3506 if (memswlimit < val) 3507 enlarge = 1; 3508 ret = res_counter_set_limit(&memcg->memsw, val); 3509 if (!ret) { 3510 if (memlimit == val) 3511 memcg->memsw_is_minimum = true; 3512 else 3513 memcg->memsw_is_minimum = false; 3514 } 3515 mutex_unlock(&set_limit_mutex); 3516 3517 if (!ret) 3518 break; 3519 3520 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3521 MEM_CGROUP_RECLAIM_NOSWAP | 3522 MEM_CGROUP_RECLAIM_SHRINK, 3523 NULL); 3524 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3525 /* Usage is reduced ? */ 3526 if (curusage >= oldusage) 3527 retry_count--; 3528 else 3529 oldusage = curusage; 3530 } 3531 if (!ret && enlarge) 3532 memcg_oom_recover(memcg); 3533 return ret; 3534 } 3535 3536 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3537 gfp_t gfp_mask, 3538 unsigned long *total_scanned) 3539 { 3540 unsigned long nr_reclaimed = 0; 3541 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3542 unsigned long reclaimed; 3543 int loop = 0; 3544 struct mem_cgroup_tree_per_zone *mctz; 3545 unsigned long long excess; 3546 unsigned long nr_scanned; 3547 3548 if (order > 0) 3549 return 0; 3550 3551 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 3552 /* 3553 * This loop can run a while, specially if mem_cgroup's continuously 3554 * keep exceeding their soft limit and putting the system under 3555 * pressure 3556 */ 3557 do { 3558 if (next_mz) 3559 mz = next_mz; 3560 else 3561 mz = mem_cgroup_largest_soft_limit_node(mctz); 3562 if (!mz) 3563 break; 3564 3565 nr_scanned = 0; 3566 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3567 gfp_mask, 3568 MEM_CGROUP_RECLAIM_SOFT, 3569 &nr_scanned); 3570 nr_reclaimed += reclaimed; 3571 *total_scanned += nr_scanned; 3572 spin_lock(&mctz->lock); 3573 3574 /* 3575 * If we failed to reclaim anything from this memory cgroup 3576 * it is time to move on to the next cgroup 3577 */ 3578 next_mz = NULL; 3579 if (!reclaimed) { 3580 do { 3581 /* 3582 * Loop until we find yet another one. 3583 * 3584 * By the time we get the soft_limit lock 3585 * again, someone might have aded the 3586 * group back on the RB tree. Iterate to 3587 * make sure we get a different mem. 3588 * mem_cgroup_largest_soft_limit_node returns 3589 * NULL if no other cgroup is present on 3590 * the tree 3591 */ 3592 next_mz = 3593 __mem_cgroup_largest_soft_limit_node(mctz); 3594 if (next_mz == mz) 3595 css_put(&next_mz->mem->css); 3596 else /* next_mz == NULL or other memcg */ 3597 break; 3598 } while (1); 3599 } 3600 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 3601 excess = res_counter_soft_limit_excess(&mz->mem->res); 3602 /* 3603 * One school of thought says that we should not add 3604 * back the node to the tree if reclaim returns 0. 3605 * But our reclaim could return 0, simply because due 3606 * to priority we are exposing a smaller subset of 3607 * memory to reclaim from. Consider this as a longer 3608 * term TODO. 3609 */ 3610 /* If excess == 0, no tree ops */ 3611 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); 3612 spin_unlock(&mctz->lock); 3613 css_put(&mz->mem->css); 3614 loop++; 3615 /* 3616 * Could not reclaim anything and there are no more 3617 * mem cgroups to try or we seem to be looping without 3618 * reclaiming anything. 3619 */ 3620 if (!nr_reclaimed && 3621 (next_mz == NULL || 3622 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3623 break; 3624 } while (!nr_reclaimed); 3625 if (next_mz) 3626 css_put(&next_mz->mem->css); 3627 return nr_reclaimed; 3628 } 3629 3630 /* 3631 * This routine traverse page_cgroup in given list and drop them all. 3632 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 3633 */ 3634 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, 3635 int node, int zid, enum lru_list lru) 3636 { 3637 struct zone *zone; 3638 struct mem_cgroup_per_zone *mz; 3639 struct page_cgroup *pc, *busy; 3640 unsigned long flags, loop; 3641 struct list_head *list; 3642 int ret = 0; 3643 3644 zone = &NODE_DATA(node)->node_zones[zid]; 3645 mz = mem_cgroup_zoneinfo(mem, node, zid); 3646 list = &mz->lists[lru]; 3647 3648 loop = MEM_CGROUP_ZSTAT(mz, lru); 3649 /* give some margin against EBUSY etc...*/ 3650 loop += 256; 3651 busy = NULL; 3652 while (loop--) { 3653 struct page *page; 3654 3655 ret = 0; 3656 spin_lock_irqsave(&zone->lru_lock, flags); 3657 if (list_empty(list)) { 3658 spin_unlock_irqrestore(&zone->lru_lock, flags); 3659 break; 3660 } 3661 pc = list_entry(list->prev, struct page_cgroup, lru); 3662 if (busy == pc) { 3663 list_move(&pc->lru, list); 3664 busy = NULL; 3665 spin_unlock_irqrestore(&zone->lru_lock, flags); 3666 continue; 3667 } 3668 spin_unlock_irqrestore(&zone->lru_lock, flags); 3669 3670 page = lookup_cgroup_page(pc); 3671 3672 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); 3673 if (ret == -ENOMEM) 3674 break; 3675 3676 if (ret == -EBUSY || ret == -EINVAL) { 3677 /* found lock contention or "pc" is obsolete. */ 3678 busy = pc; 3679 cond_resched(); 3680 } else 3681 busy = NULL; 3682 } 3683 3684 if (!ret && !list_empty(list)) 3685 return -EBUSY; 3686 return ret; 3687 } 3688 3689 /* 3690 * make mem_cgroup's charge to be 0 if there is no task. 3691 * This enables deleting this mem_cgroup. 3692 */ 3693 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) 3694 { 3695 int ret; 3696 int node, zid, shrink; 3697 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3698 struct cgroup *cgrp = mem->css.cgroup; 3699 3700 css_get(&mem->css); 3701 3702 shrink = 0; 3703 /* should free all ? */ 3704 if (free_all) 3705 goto try_to_free; 3706 move_account: 3707 do { 3708 ret = -EBUSY; 3709 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 3710 goto out; 3711 ret = -EINTR; 3712 if (signal_pending(current)) 3713 goto out; 3714 /* This is for making all *used* pages to be on LRU. */ 3715 lru_add_drain_all(); 3716 drain_all_stock_sync(); 3717 ret = 0; 3718 mem_cgroup_start_move(mem); 3719 for_each_node_state(node, N_HIGH_MEMORY) { 3720 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 3721 enum lru_list l; 3722 for_each_lru(l) { 3723 ret = mem_cgroup_force_empty_list(mem, 3724 node, zid, l); 3725 if (ret) 3726 break; 3727 } 3728 } 3729 if (ret) 3730 break; 3731 } 3732 mem_cgroup_end_move(mem); 3733 memcg_oom_recover(mem); 3734 /* it seems parent cgroup doesn't have enough mem */ 3735 if (ret == -ENOMEM) 3736 goto try_to_free; 3737 cond_resched(); 3738 /* "ret" should also be checked to ensure all lists are empty. */ 3739 } while (mem->res.usage > 0 || ret); 3740 out: 3741 css_put(&mem->css); 3742 return ret; 3743 3744 try_to_free: 3745 /* returns EBUSY if there is a task or if we come here twice. */ 3746 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { 3747 ret = -EBUSY; 3748 goto out; 3749 } 3750 /* we call try-to-free pages for make this cgroup empty */ 3751 lru_add_drain_all(); 3752 /* try to free all pages in this cgroup */ 3753 shrink = 1; 3754 while (nr_retries && mem->res.usage > 0) { 3755 int progress; 3756 3757 if (signal_pending(current)) { 3758 ret = -EINTR; 3759 goto out; 3760 } 3761 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3762 false, get_swappiness(mem)); 3763 if (!progress) { 3764 nr_retries--; 3765 /* maybe some writeback is necessary */ 3766 congestion_wait(BLK_RW_ASYNC, HZ/10); 3767 } 3768 3769 } 3770 lru_add_drain(); 3771 /* try move_account...there may be some *locked* pages. */ 3772 goto move_account; 3773 } 3774 3775 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3776 { 3777 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3778 } 3779 3780 3781 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 3782 { 3783 return mem_cgroup_from_cont(cont)->use_hierarchy; 3784 } 3785 3786 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 3787 u64 val) 3788 { 3789 int retval = 0; 3790 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3791 struct cgroup *parent = cont->parent; 3792 struct mem_cgroup *parent_mem = NULL; 3793 3794 if (parent) 3795 parent_mem = mem_cgroup_from_cont(parent); 3796 3797 cgroup_lock(); 3798 /* 3799 * If parent's use_hierarchy is set, we can't make any modifications 3800 * in the child subtrees. If it is unset, then the change can 3801 * occur, provided the current cgroup has no children. 3802 * 3803 * For the root cgroup, parent_mem is NULL, we allow value to be 3804 * set if there are no children. 3805 */ 3806 if ((!parent_mem || !parent_mem->use_hierarchy) && 3807 (val == 1 || val == 0)) { 3808 if (list_empty(&cont->children)) 3809 mem->use_hierarchy = val; 3810 else 3811 retval = -EBUSY; 3812 } else 3813 retval = -EINVAL; 3814 cgroup_unlock(); 3815 3816 return retval; 3817 } 3818 3819 3820 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, 3821 enum mem_cgroup_stat_index idx) 3822 { 3823 struct mem_cgroup *iter; 3824 long val = 0; 3825 3826 /* Per-cpu values can be negative, use a signed accumulator */ 3827 for_each_mem_cgroup_tree(iter, mem) 3828 val += mem_cgroup_read_stat(iter, idx); 3829 3830 if (val < 0) /* race ? */ 3831 val = 0; 3832 return val; 3833 } 3834 3835 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) 3836 { 3837 u64 val; 3838 3839 if (!mem_cgroup_is_root(mem)) { 3840 if (!swap) 3841 return res_counter_read_u64(&mem->res, RES_USAGE); 3842 else 3843 return res_counter_read_u64(&mem->memsw, RES_USAGE); 3844 } 3845 3846 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); 3847 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); 3848 3849 if (swap) 3850 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 3851 3852 return val << PAGE_SHIFT; 3853 } 3854 3855 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 3856 { 3857 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 3858 u64 val; 3859 int type, name; 3860 3861 type = MEMFILE_TYPE(cft->private); 3862 name = MEMFILE_ATTR(cft->private); 3863 switch (type) { 3864 case _MEM: 3865 if (name == RES_USAGE) 3866 val = mem_cgroup_usage(mem, false); 3867 else 3868 val = res_counter_read_u64(&mem->res, name); 3869 break; 3870 case _MEMSWAP: 3871 if (name == RES_USAGE) 3872 val = mem_cgroup_usage(mem, true); 3873 else 3874 val = res_counter_read_u64(&mem->memsw, name); 3875 break; 3876 default: 3877 BUG(); 3878 break; 3879 } 3880 return val; 3881 } 3882 /* 3883 * The user of this function is... 3884 * RES_LIMIT. 3885 */ 3886 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 3887 const char *buffer) 3888 { 3889 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 3890 int type, name; 3891 unsigned long long val; 3892 int ret; 3893 3894 type = MEMFILE_TYPE(cft->private); 3895 name = MEMFILE_ATTR(cft->private); 3896 switch (name) { 3897 case RES_LIMIT: 3898 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3899 ret = -EINVAL; 3900 break; 3901 } 3902 /* This function does all necessary parse...reuse it */ 3903 ret = res_counter_memparse_write_strategy(buffer, &val); 3904 if (ret) 3905 break; 3906 if (type == _MEM) 3907 ret = mem_cgroup_resize_limit(memcg, val); 3908 else 3909 ret = mem_cgroup_resize_memsw_limit(memcg, val); 3910 break; 3911 case RES_SOFT_LIMIT: 3912 ret = res_counter_memparse_write_strategy(buffer, &val); 3913 if (ret) 3914 break; 3915 /* 3916 * For memsw, soft limits are hard to implement in terms 3917 * of semantics, for now, we support soft limits for 3918 * control without swap 3919 */ 3920 if (type == _MEM) 3921 ret = res_counter_set_soft_limit(&memcg->res, val); 3922 else 3923 ret = -EINVAL; 3924 break; 3925 default: 3926 ret = -EINVAL; /* should be BUG() ? */ 3927 break; 3928 } 3929 return ret; 3930 } 3931 3932 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 3933 unsigned long long *mem_limit, unsigned long long *memsw_limit) 3934 { 3935 struct cgroup *cgroup; 3936 unsigned long long min_limit, min_memsw_limit, tmp; 3937 3938 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 3939 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3940 cgroup = memcg->css.cgroup; 3941 if (!memcg->use_hierarchy) 3942 goto out; 3943 3944 while (cgroup->parent) { 3945 cgroup = cgroup->parent; 3946 memcg = mem_cgroup_from_cont(cgroup); 3947 if (!memcg->use_hierarchy) 3948 break; 3949 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 3950 min_limit = min(min_limit, tmp); 3951 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 3952 min_memsw_limit = min(min_memsw_limit, tmp); 3953 } 3954 out: 3955 *mem_limit = min_limit; 3956 *memsw_limit = min_memsw_limit; 3957 return; 3958 } 3959 3960 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 3961 { 3962 struct mem_cgroup *mem; 3963 int type, name; 3964 3965 mem = mem_cgroup_from_cont(cont); 3966 type = MEMFILE_TYPE(event); 3967 name = MEMFILE_ATTR(event); 3968 switch (name) { 3969 case RES_MAX_USAGE: 3970 if (type == _MEM) 3971 res_counter_reset_max(&mem->res); 3972 else 3973 res_counter_reset_max(&mem->memsw); 3974 break; 3975 case RES_FAILCNT: 3976 if (type == _MEM) 3977 res_counter_reset_failcnt(&mem->res); 3978 else 3979 res_counter_reset_failcnt(&mem->memsw); 3980 break; 3981 } 3982 3983 return 0; 3984 } 3985 3986 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 3987 struct cftype *cft) 3988 { 3989 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 3990 } 3991 3992 #ifdef CONFIG_MMU 3993 static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 3994 struct cftype *cft, u64 val) 3995 { 3996 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 3997 3998 if (val >= (1 << NR_MOVE_TYPE)) 3999 return -EINVAL; 4000 /* 4001 * We check this value several times in both in can_attach() and 4002 * attach(), so we need cgroup lock to prevent this value from being 4003 * inconsistent. 4004 */ 4005 cgroup_lock(); 4006 mem->move_charge_at_immigrate = val; 4007 cgroup_unlock(); 4008 4009 return 0; 4010 } 4011 #else 4012 static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 4013 struct cftype *cft, u64 val) 4014 { 4015 return -ENOSYS; 4016 } 4017 #endif 4018 4019 4020 /* For read statistics */ 4021 enum { 4022 MCS_CACHE, 4023 MCS_RSS, 4024 MCS_FILE_MAPPED, 4025 MCS_PGPGIN, 4026 MCS_PGPGOUT, 4027 MCS_SWAP, 4028 MCS_PGFAULT, 4029 MCS_PGMAJFAULT, 4030 MCS_INACTIVE_ANON, 4031 MCS_ACTIVE_ANON, 4032 MCS_INACTIVE_FILE, 4033 MCS_ACTIVE_FILE, 4034 MCS_UNEVICTABLE, 4035 NR_MCS_STAT, 4036 }; 4037 4038 struct mcs_total_stat { 4039 s64 stat[NR_MCS_STAT]; 4040 }; 4041 4042 struct { 4043 char *local_name; 4044 char *total_name; 4045 } memcg_stat_strings[NR_MCS_STAT] = { 4046 {"cache", "total_cache"}, 4047 {"rss", "total_rss"}, 4048 {"mapped_file", "total_mapped_file"}, 4049 {"pgpgin", "total_pgpgin"}, 4050 {"pgpgout", "total_pgpgout"}, 4051 {"swap", "total_swap"}, 4052 {"pgfault", "total_pgfault"}, 4053 {"pgmajfault", "total_pgmajfault"}, 4054 {"inactive_anon", "total_inactive_anon"}, 4055 {"active_anon", "total_active_anon"}, 4056 {"inactive_file", "total_inactive_file"}, 4057 {"active_file", "total_active_file"}, 4058 {"unevictable", "total_unevictable"} 4059 }; 4060 4061 4062 static void 4063 mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4064 { 4065 s64 val; 4066 4067 /* per cpu stat */ 4068 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); 4069 s->stat[MCS_CACHE] += val * PAGE_SIZE; 4070 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); 4071 s->stat[MCS_RSS] += val * PAGE_SIZE; 4072 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); 4073 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; 4074 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); 4075 s->stat[MCS_PGPGIN] += val; 4076 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); 4077 s->stat[MCS_PGPGOUT] += val; 4078 if (do_swap_account) { 4079 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 4080 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4081 } 4082 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); 4083 s->stat[MCS_PGFAULT] += val; 4084 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); 4085 s->stat[MCS_PGMAJFAULT] += val; 4086 4087 /* per zone stat */ 4088 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); 4089 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; 4090 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON); 4091 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; 4092 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE); 4093 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; 4094 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE); 4095 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; 4096 val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE); 4097 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; 4098 } 4099 4100 static void 4101 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) 4102 { 4103 struct mem_cgroup *iter; 4104 4105 for_each_mem_cgroup_tree(iter, mem) 4106 mem_cgroup_get_local_stat(iter, s); 4107 } 4108 4109 #ifdef CONFIG_NUMA 4110 static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4111 { 4112 int nid; 4113 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4114 unsigned long node_nr; 4115 struct cgroup *cont = m->private; 4116 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4117 4118 total_nr = mem_cgroup_nr_lru_pages(mem_cont); 4119 seq_printf(m, "total=%lu", total_nr); 4120 for_each_node_state(nid, N_HIGH_MEMORY) { 4121 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid); 4122 seq_printf(m, " N%d=%lu", nid, node_nr); 4123 } 4124 seq_putc(m, '\n'); 4125 4126 file_nr = mem_cgroup_nr_file_lru_pages(mem_cont); 4127 seq_printf(m, "file=%lu", file_nr); 4128 for_each_node_state(nid, N_HIGH_MEMORY) { 4129 node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid); 4130 seq_printf(m, " N%d=%lu", nid, node_nr); 4131 } 4132 seq_putc(m, '\n'); 4133 4134 anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont); 4135 seq_printf(m, "anon=%lu", anon_nr); 4136 for_each_node_state(nid, N_HIGH_MEMORY) { 4137 node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid); 4138 seq_printf(m, " N%d=%lu", nid, node_nr); 4139 } 4140 seq_putc(m, '\n'); 4141 4142 unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont); 4143 seq_printf(m, "unevictable=%lu", unevictable_nr); 4144 for_each_node_state(nid, N_HIGH_MEMORY) { 4145 node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont, 4146 nid); 4147 seq_printf(m, " N%d=%lu", nid, node_nr); 4148 } 4149 seq_putc(m, '\n'); 4150 return 0; 4151 } 4152 #endif /* CONFIG_NUMA */ 4153 4154 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4155 struct cgroup_map_cb *cb) 4156 { 4157 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); 4158 struct mcs_total_stat mystat; 4159 int i; 4160 4161 memset(&mystat, 0, sizeof(mystat)); 4162 mem_cgroup_get_local_stat(mem_cont, &mystat); 4163 4164 4165 for (i = 0; i < NR_MCS_STAT; i++) { 4166 if (i == MCS_SWAP && !do_swap_account) 4167 continue; 4168 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4169 } 4170 4171 /* Hierarchical information */ 4172 { 4173 unsigned long long limit, memsw_limit; 4174 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); 4175 cb->fill(cb, "hierarchical_memory_limit", limit); 4176 if (do_swap_account) 4177 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4178 } 4179 4180 memset(&mystat, 0, sizeof(mystat)); 4181 mem_cgroup_get_total_stat(mem_cont, &mystat); 4182 for (i = 0; i < NR_MCS_STAT; i++) { 4183 if (i == MCS_SWAP && !do_swap_account) 4184 continue; 4185 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4186 } 4187 4188 #ifdef CONFIG_DEBUG_VM 4189 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); 4190 4191 { 4192 int nid, zid; 4193 struct mem_cgroup_per_zone *mz; 4194 unsigned long recent_rotated[2] = {0, 0}; 4195 unsigned long recent_scanned[2] = {0, 0}; 4196 4197 for_each_online_node(nid) 4198 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4199 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 4200 4201 recent_rotated[0] += 4202 mz->reclaim_stat.recent_rotated[0]; 4203 recent_rotated[1] += 4204 mz->reclaim_stat.recent_rotated[1]; 4205 recent_scanned[0] += 4206 mz->reclaim_stat.recent_scanned[0]; 4207 recent_scanned[1] += 4208 mz->reclaim_stat.recent_scanned[1]; 4209 } 4210 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4211 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4212 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4213 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4214 } 4215 #endif 4216 4217 return 0; 4218 } 4219 4220 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 4221 { 4222 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4223 4224 return get_swappiness(memcg); 4225 } 4226 4227 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 4228 u64 val) 4229 { 4230 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4231 struct mem_cgroup *parent; 4232 4233 if (val > 100) 4234 return -EINVAL; 4235 4236 if (cgrp->parent == NULL) 4237 return -EINVAL; 4238 4239 parent = mem_cgroup_from_cont(cgrp->parent); 4240 4241 cgroup_lock(); 4242 4243 /* If under hierarchy, only empty-root can set this value */ 4244 if ((parent->use_hierarchy) || 4245 (memcg->use_hierarchy && !list_empty(&cgrp->children))) { 4246 cgroup_unlock(); 4247 return -EINVAL; 4248 } 4249 4250 memcg->swappiness = val; 4251 4252 cgroup_unlock(); 4253 4254 return 0; 4255 } 4256 4257 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4258 { 4259 struct mem_cgroup_threshold_ary *t; 4260 u64 usage; 4261 int i; 4262 4263 rcu_read_lock(); 4264 if (!swap) 4265 t = rcu_dereference(memcg->thresholds.primary); 4266 else 4267 t = rcu_dereference(memcg->memsw_thresholds.primary); 4268 4269 if (!t) 4270 goto unlock; 4271 4272 usage = mem_cgroup_usage(memcg, swap); 4273 4274 /* 4275 * current_threshold points to threshold just below usage. 4276 * If it's not true, a threshold was crossed after last 4277 * call of __mem_cgroup_threshold(). 4278 */ 4279 i = t->current_threshold; 4280 4281 /* 4282 * Iterate backward over array of thresholds starting from 4283 * current_threshold and check if a threshold is crossed. 4284 * If none of thresholds below usage is crossed, we read 4285 * only one element of the array here. 4286 */ 4287 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4288 eventfd_signal(t->entries[i].eventfd, 1); 4289 4290 /* i = current_threshold + 1 */ 4291 i++; 4292 4293 /* 4294 * Iterate forward over array of thresholds starting from 4295 * current_threshold+1 and check if a threshold is crossed. 4296 * If none of thresholds above usage is crossed, we read 4297 * only one element of the array here. 4298 */ 4299 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4300 eventfd_signal(t->entries[i].eventfd, 1); 4301 4302 /* Update current_threshold */ 4303 t->current_threshold = i - 1; 4304 unlock: 4305 rcu_read_unlock(); 4306 } 4307 4308 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4309 { 4310 while (memcg) { 4311 __mem_cgroup_threshold(memcg, false); 4312 if (do_swap_account) 4313 __mem_cgroup_threshold(memcg, true); 4314 4315 memcg = parent_mem_cgroup(memcg); 4316 } 4317 } 4318 4319 static int compare_thresholds(const void *a, const void *b) 4320 { 4321 const struct mem_cgroup_threshold *_a = a; 4322 const struct mem_cgroup_threshold *_b = b; 4323 4324 return _a->threshold - _b->threshold; 4325 } 4326 4327 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) 4328 { 4329 struct mem_cgroup_eventfd_list *ev; 4330 4331 list_for_each_entry(ev, &mem->oom_notify, list) 4332 eventfd_signal(ev->eventfd, 1); 4333 return 0; 4334 } 4335 4336 static void mem_cgroup_oom_notify(struct mem_cgroup *mem) 4337 { 4338 struct mem_cgroup *iter; 4339 4340 for_each_mem_cgroup_tree(iter, mem) 4341 mem_cgroup_oom_notify_cb(iter); 4342 } 4343 4344 static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 4345 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4346 { 4347 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4348 struct mem_cgroup_thresholds *thresholds; 4349 struct mem_cgroup_threshold_ary *new; 4350 int type = MEMFILE_TYPE(cft->private); 4351 u64 threshold, usage; 4352 int i, size, ret; 4353 4354 ret = res_counter_memparse_write_strategy(args, &threshold); 4355 if (ret) 4356 return ret; 4357 4358 mutex_lock(&memcg->thresholds_lock); 4359 4360 if (type == _MEM) 4361 thresholds = &memcg->thresholds; 4362 else if (type == _MEMSWAP) 4363 thresholds = &memcg->memsw_thresholds; 4364 else 4365 BUG(); 4366 4367 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4368 4369 /* Check if a threshold crossed before adding a new one */ 4370 if (thresholds->primary) 4371 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4372 4373 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4374 4375 /* Allocate memory for new array of thresholds */ 4376 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 4377 GFP_KERNEL); 4378 if (!new) { 4379 ret = -ENOMEM; 4380 goto unlock; 4381 } 4382 new->size = size; 4383 4384 /* Copy thresholds (if any) to new array */ 4385 if (thresholds->primary) { 4386 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 4387 sizeof(struct mem_cgroup_threshold)); 4388 } 4389 4390 /* Add new threshold */ 4391 new->entries[size - 1].eventfd = eventfd; 4392 new->entries[size - 1].threshold = threshold; 4393 4394 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4395 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 4396 compare_thresholds, NULL); 4397 4398 /* Find current threshold */ 4399 new->current_threshold = -1; 4400 for (i = 0; i < size; i++) { 4401 if (new->entries[i].threshold < usage) { 4402 /* 4403 * new->current_threshold will not be used until 4404 * rcu_assign_pointer(), so it's safe to increment 4405 * it here. 4406 */ 4407 ++new->current_threshold; 4408 } 4409 } 4410 4411 /* Free old spare buffer and save old primary buffer as spare */ 4412 kfree(thresholds->spare); 4413 thresholds->spare = thresholds->primary; 4414 4415 rcu_assign_pointer(thresholds->primary, new); 4416 4417 /* To be sure that nobody uses thresholds */ 4418 synchronize_rcu(); 4419 4420 unlock: 4421 mutex_unlock(&memcg->thresholds_lock); 4422 4423 return ret; 4424 } 4425 4426 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 4427 struct cftype *cft, struct eventfd_ctx *eventfd) 4428 { 4429 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4430 struct mem_cgroup_thresholds *thresholds; 4431 struct mem_cgroup_threshold_ary *new; 4432 int type = MEMFILE_TYPE(cft->private); 4433 u64 usage; 4434 int i, j, size; 4435 4436 mutex_lock(&memcg->thresholds_lock); 4437 if (type == _MEM) 4438 thresholds = &memcg->thresholds; 4439 else if (type == _MEMSWAP) 4440 thresholds = &memcg->memsw_thresholds; 4441 else 4442 BUG(); 4443 4444 /* 4445 * Something went wrong if we trying to unregister a threshold 4446 * if we don't have thresholds 4447 */ 4448 BUG_ON(!thresholds); 4449 4450 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 4451 4452 /* Check if a threshold crossed before removing */ 4453 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4454 4455 /* Calculate new number of threshold */ 4456 size = 0; 4457 for (i = 0; i < thresholds->primary->size; i++) { 4458 if (thresholds->primary->entries[i].eventfd != eventfd) 4459 size++; 4460 } 4461 4462 new = thresholds->spare; 4463 4464 /* Set thresholds array to NULL if we don't have thresholds */ 4465 if (!size) { 4466 kfree(new); 4467 new = NULL; 4468 goto swap_buffers; 4469 } 4470 4471 new->size = size; 4472 4473 /* Copy thresholds and find current threshold */ 4474 new->current_threshold = -1; 4475 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4476 if (thresholds->primary->entries[i].eventfd == eventfd) 4477 continue; 4478 4479 new->entries[j] = thresholds->primary->entries[i]; 4480 if (new->entries[j].threshold < usage) { 4481 /* 4482 * new->current_threshold will not be used 4483 * until rcu_assign_pointer(), so it's safe to increment 4484 * it here. 4485 */ 4486 ++new->current_threshold; 4487 } 4488 j++; 4489 } 4490 4491 swap_buffers: 4492 /* Swap primary and spare array */ 4493 thresholds->spare = thresholds->primary; 4494 rcu_assign_pointer(thresholds->primary, new); 4495 4496 /* To be sure that nobody uses thresholds */ 4497 synchronize_rcu(); 4498 4499 mutex_unlock(&memcg->thresholds_lock); 4500 } 4501 4502 static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 4503 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 4504 { 4505 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 4506 struct mem_cgroup_eventfd_list *event; 4507 int type = MEMFILE_TYPE(cft->private); 4508 4509 BUG_ON(type != _OOM_TYPE); 4510 event = kmalloc(sizeof(*event), GFP_KERNEL); 4511 if (!event) 4512 return -ENOMEM; 4513 4514 mutex_lock(&memcg_oom_mutex); 4515 4516 event->eventfd = eventfd; 4517 list_add(&event->list, &memcg->oom_notify); 4518 4519 /* already in OOM ? */ 4520 if (atomic_read(&memcg->oom_lock)) 4521 eventfd_signal(eventfd, 1); 4522 mutex_unlock(&memcg_oom_mutex); 4523 4524 return 0; 4525 } 4526 4527 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 4528 struct cftype *cft, struct eventfd_ctx *eventfd) 4529 { 4530 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4531 struct mem_cgroup_eventfd_list *ev, *tmp; 4532 int type = MEMFILE_TYPE(cft->private); 4533 4534 BUG_ON(type != _OOM_TYPE); 4535 4536 mutex_lock(&memcg_oom_mutex); 4537 4538 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { 4539 if (ev->eventfd == eventfd) { 4540 list_del(&ev->list); 4541 kfree(ev); 4542 } 4543 } 4544 4545 mutex_unlock(&memcg_oom_mutex); 4546 } 4547 4548 static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 4549 struct cftype *cft, struct cgroup_map_cb *cb) 4550 { 4551 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4552 4553 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); 4554 4555 if (atomic_read(&mem->oom_lock)) 4556 cb->fill(cb, "under_oom", 1); 4557 else 4558 cb->fill(cb, "under_oom", 0); 4559 return 0; 4560 } 4561 4562 static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 4563 struct cftype *cft, u64 val) 4564 { 4565 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4566 struct mem_cgroup *parent; 4567 4568 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4569 if (!cgrp->parent || !((val == 0) || (val == 1))) 4570 return -EINVAL; 4571 4572 parent = mem_cgroup_from_cont(cgrp->parent); 4573 4574 cgroup_lock(); 4575 /* oom-kill-disable is a flag for subhierarchy. */ 4576 if ((parent->use_hierarchy) || 4577 (mem->use_hierarchy && !list_empty(&cgrp->children))) { 4578 cgroup_unlock(); 4579 return -EINVAL; 4580 } 4581 mem->oom_kill_disable = val; 4582 if (!val) 4583 memcg_oom_recover(mem); 4584 cgroup_unlock(); 4585 return 0; 4586 } 4587 4588 #ifdef CONFIG_NUMA 4589 static const struct file_operations mem_control_numa_stat_file_operations = { 4590 .read = seq_read, 4591 .llseek = seq_lseek, 4592 .release = single_release, 4593 }; 4594 4595 static int mem_control_numa_stat_open(struct inode *unused, struct file *file) 4596 { 4597 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; 4598 4599 file->f_op = &mem_control_numa_stat_file_operations; 4600 return single_open(file, mem_control_numa_stat_show, cont); 4601 } 4602 #endif /* CONFIG_NUMA */ 4603 4604 static struct cftype mem_cgroup_files[] = { 4605 { 4606 .name = "usage_in_bytes", 4607 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4608 .read_u64 = mem_cgroup_read, 4609 .register_event = mem_cgroup_usage_register_event, 4610 .unregister_event = mem_cgroup_usage_unregister_event, 4611 }, 4612 { 4613 .name = "max_usage_in_bytes", 4614 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4615 .trigger = mem_cgroup_reset, 4616 .read_u64 = mem_cgroup_read, 4617 }, 4618 { 4619 .name = "limit_in_bytes", 4620 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4621 .write_string = mem_cgroup_write, 4622 .read_u64 = mem_cgroup_read, 4623 }, 4624 { 4625 .name = "soft_limit_in_bytes", 4626 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4627 .write_string = mem_cgroup_write, 4628 .read_u64 = mem_cgroup_read, 4629 }, 4630 { 4631 .name = "failcnt", 4632 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4633 .trigger = mem_cgroup_reset, 4634 .read_u64 = mem_cgroup_read, 4635 }, 4636 { 4637 .name = "stat", 4638 .read_map = mem_control_stat_show, 4639 }, 4640 { 4641 .name = "force_empty", 4642 .trigger = mem_cgroup_force_empty_write, 4643 }, 4644 { 4645 .name = "use_hierarchy", 4646 .write_u64 = mem_cgroup_hierarchy_write, 4647 .read_u64 = mem_cgroup_hierarchy_read, 4648 }, 4649 { 4650 .name = "swappiness", 4651 .read_u64 = mem_cgroup_swappiness_read, 4652 .write_u64 = mem_cgroup_swappiness_write, 4653 }, 4654 { 4655 .name = "move_charge_at_immigrate", 4656 .read_u64 = mem_cgroup_move_charge_read, 4657 .write_u64 = mem_cgroup_move_charge_write, 4658 }, 4659 { 4660 .name = "oom_control", 4661 .read_map = mem_cgroup_oom_control_read, 4662 .write_u64 = mem_cgroup_oom_control_write, 4663 .register_event = mem_cgroup_oom_register_event, 4664 .unregister_event = mem_cgroup_oom_unregister_event, 4665 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4666 }, 4667 #ifdef CONFIG_NUMA 4668 { 4669 .name = "numa_stat", 4670 .open = mem_control_numa_stat_open, 4671 .mode = S_IRUGO, 4672 }, 4673 #endif 4674 }; 4675 4676 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4677 static struct cftype memsw_cgroup_files[] = { 4678 { 4679 .name = "memsw.usage_in_bytes", 4680 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 4681 .read_u64 = mem_cgroup_read, 4682 .register_event = mem_cgroup_usage_register_event, 4683 .unregister_event = mem_cgroup_usage_unregister_event, 4684 }, 4685 { 4686 .name = "memsw.max_usage_in_bytes", 4687 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 4688 .trigger = mem_cgroup_reset, 4689 .read_u64 = mem_cgroup_read, 4690 }, 4691 { 4692 .name = "memsw.limit_in_bytes", 4693 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 4694 .write_string = mem_cgroup_write, 4695 .read_u64 = mem_cgroup_read, 4696 }, 4697 { 4698 .name = "memsw.failcnt", 4699 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 4700 .trigger = mem_cgroup_reset, 4701 .read_u64 = mem_cgroup_read, 4702 }, 4703 }; 4704 4705 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4706 { 4707 if (!do_swap_account) 4708 return 0; 4709 return cgroup_add_files(cont, ss, memsw_cgroup_files, 4710 ARRAY_SIZE(memsw_cgroup_files)); 4711 }; 4712 #else 4713 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) 4714 { 4715 return 0; 4716 } 4717 #endif 4718 4719 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4720 { 4721 struct mem_cgroup_per_node *pn; 4722 struct mem_cgroup_per_zone *mz; 4723 enum lru_list l; 4724 int zone, tmp = node; 4725 /* 4726 * This routine is called against possible nodes. 4727 * But it's BUG to call kmalloc() against offline node. 4728 * 4729 * TODO: this routine can waste much memory for nodes which will 4730 * never be onlined. It's better to use memory hotplug callback 4731 * function. 4732 */ 4733 if (!node_state(node, N_NORMAL_MEMORY)) 4734 tmp = -1; 4735 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4736 if (!pn) 4737 return 1; 4738 4739 mem->info.nodeinfo[node] = pn; 4740 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4741 mz = &pn->zoneinfo[zone]; 4742 for_each_lru(l) 4743 INIT_LIST_HEAD(&mz->lists[l]); 4744 mz->usage_in_excess = 0; 4745 mz->on_tree = false; 4746 mz->mem = mem; 4747 } 4748 return 0; 4749 } 4750 4751 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 4752 { 4753 kfree(mem->info.nodeinfo[node]); 4754 } 4755 4756 static struct mem_cgroup *mem_cgroup_alloc(void) 4757 { 4758 struct mem_cgroup *mem; 4759 int size = sizeof(struct mem_cgroup); 4760 4761 /* Can be very big if MAX_NUMNODES is very big */ 4762 if (size < PAGE_SIZE) 4763 mem = kzalloc(size, GFP_KERNEL); 4764 else 4765 mem = vzalloc(size); 4766 4767 if (!mem) 4768 return NULL; 4769 4770 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4771 if (!mem->stat) 4772 goto out_free; 4773 spin_lock_init(&mem->pcp_counter_lock); 4774 return mem; 4775 4776 out_free: 4777 if (size < PAGE_SIZE) 4778 kfree(mem); 4779 else 4780 vfree(mem); 4781 return NULL; 4782 } 4783 4784 /* 4785 * At destroying mem_cgroup, references from swap_cgroup can remain. 4786 * (scanning all at force_empty is too costly...) 4787 * 4788 * Instead of clearing all references at force_empty, we remember 4789 * the number of reference from swap_cgroup and free mem_cgroup when 4790 * it goes down to 0. 4791 * 4792 * Removal of cgroup itself succeeds regardless of refs from swap. 4793 */ 4794 4795 static void __mem_cgroup_free(struct mem_cgroup *mem) 4796 { 4797 int node; 4798 4799 mem_cgroup_remove_from_trees(mem); 4800 free_css_id(&mem_cgroup_subsys, &mem->css); 4801 4802 for_each_node_state(node, N_POSSIBLE) 4803 free_mem_cgroup_per_zone_info(mem, node); 4804 4805 free_percpu(mem->stat); 4806 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4807 kfree(mem); 4808 else 4809 vfree(mem); 4810 } 4811 4812 static void mem_cgroup_get(struct mem_cgroup *mem) 4813 { 4814 atomic_inc(&mem->refcnt); 4815 } 4816 4817 static void __mem_cgroup_put(struct mem_cgroup *mem, int count) 4818 { 4819 if (atomic_sub_and_test(count, &mem->refcnt)) { 4820 struct mem_cgroup *parent = parent_mem_cgroup(mem); 4821 __mem_cgroup_free(mem); 4822 if (parent) 4823 mem_cgroup_put(parent); 4824 } 4825 } 4826 4827 static void mem_cgroup_put(struct mem_cgroup *mem) 4828 { 4829 __mem_cgroup_put(mem, 1); 4830 } 4831 4832 /* 4833 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4834 */ 4835 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) 4836 { 4837 if (!mem->res.parent) 4838 return NULL; 4839 return mem_cgroup_from_res_counter(mem->res.parent, res); 4840 } 4841 4842 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4843 static void __init enable_swap_cgroup(void) 4844 { 4845 if (!mem_cgroup_disabled() && really_do_swap_account) 4846 do_swap_account = 1; 4847 } 4848 #else 4849 static void __init enable_swap_cgroup(void) 4850 { 4851 } 4852 #endif 4853 4854 static int mem_cgroup_soft_limit_tree_init(void) 4855 { 4856 struct mem_cgroup_tree_per_node *rtpn; 4857 struct mem_cgroup_tree_per_zone *rtpz; 4858 int tmp, node, zone; 4859 4860 for_each_node_state(node, N_POSSIBLE) { 4861 tmp = node; 4862 if (!node_state(node, N_NORMAL_MEMORY)) 4863 tmp = -1; 4864 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 4865 if (!rtpn) 4866 return 1; 4867 4868 soft_limit_tree.rb_tree_per_node[node] = rtpn; 4869 4870 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4871 rtpz = &rtpn->rb_tree_per_zone[zone]; 4872 rtpz->rb_root = RB_ROOT; 4873 spin_lock_init(&rtpz->lock); 4874 } 4875 } 4876 return 0; 4877 } 4878 4879 static struct cgroup_subsys_state * __ref 4880 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 4881 { 4882 struct mem_cgroup *mem, *parent; 4883 long error = -ENOMEM; 4884 int node; 4885 4886 mem = mem_cgroup_alloc(); 4887 if (!mem) 4888 return ERR_PTR(error); 4889 4890 for_each_node_state(node, N_POSSIBLE) 4891 if (alloc_mem_cgroup_per_zone_info(mem, node)) 4892 goto free_out; 4893 4894 /* root ? */ 4895 if (cont->parent == NULL) { 4896 int cpu; 4897 enable_swap_cgroup(); 4898 parent = NULL; 4899 root_mem_cgroup = mem; 4900 if (mem_cgroup_soft_limit_tree_init()) 4901 goto free_out; 4902 for_each_possible_cpu(cpu) { 4903 struct memcg_stock_pcp *stock = 4904 &per_cpu(memcg_stock, cpu); 4905 INIT_WORK(&stock->work, drain_local_stock); 4906 } 4907 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 4908 } else { 4909 parent = mem_cgroup_from_cont(cont->parent); 4910 mem->use_hierarchy = parent->use_hierarchy; 4911 mem->oom_kill_disable = parent->oom_kill_disable; 4912 } 4913 4914 if (parent && parent->use_hierarchy) { 4915 res_counter_init(&mem->res, &parent->res); 4916 res_counter_init(&mem->memsw, &parent->memsw); 4917 /* 4918 * We increment refcnt of the parent to ensure that we can 4919 * safely access it on res_counter_charge/uncharge. 4920 * This refcnt will be decremented when freeing this 4921 * mem_cgroup(see mem_cgroup_put). 4922 */ 4923 mem_cgroup_get(parent); 4924 } else { 4925 res_counter_init(&mem->res, NULL); 4926 res_counter_init(&mem->memsw, NULL); 4927 } 4928 mem->last_scanned_child = 0; 4929 mem->last_scanned_node = MAX_NUMNODES; 4930 INIT_LIST_HEAD(&mem->oom_notify); 4931 4932 if (parent) 4933 mem->swappiness = get_swappiness(parent); 4934 atomic_set(&mem->refcnt, 1); 4935 mem->move_charge_at_immigrate = 0; 4936 mutex_init(&mem->thresholds_lock); 4937 return &mem->css; 4938 free_out: 4939 __mem_cgroup_free(mem); 4940 root_mem_cgroup = NULL; 4941 return ERR_PTR(error); 4942 } 4943 4944 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 4945 struct cgroup *cont) 4946 { 4947 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4948 4949 return mem_cgroup_force_empty(mem, false); 4950 } 4951 4952 static void mem_cgroup_destroy(struct cgroup_subsys *ss, 4953 struct cgroup *cont) 4954 { 4955 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 4956 4957 mem_cgroup_put(mem); 4958 } 4959 4960 static int mem_cgroup_populate(struct cgroup_subsys *ss, 4961 struct cgroup *cont) 4962 { 4963 int ret; 4964 4965 ret = cgroup_add_files(cont, ss, mem_cgroup_files, 4966 ARRAY_SIZE(mem_cgroup_files)); 4967 4968 if (!ret) 4969 ret = register_memsw_files(cont, ss); 4970 return ret; 4971 } 4972 4973 #ifdef CONFIG_MMU 4974 /* Handlers for move charge at task migration. */ 4975 #define PRECHARGE_COUNT_AT_ONCE 256 4976 static int mem_cgroup_do_precharge(unsigned long count) 4977 { 4978 int ret = 0; 4979 int batch_count = PRECHARGE_COUNT_AT_ONCE; 4980 struct mem_cgroup *mem = mc.to; 4981 4982 if (mem_cgroup_is_root(mem)) { 4983 mc.precharge += count; 4984 /* we don't need css_get for root */ 4985 return ret; 4986 } 4987 /* try to charge at once */ 4988 if (count > 1) { 4989 struct res_counter *dummy; 4990 /* 4991 * "mem" cannot be under rmdir() because we've already checked 4992 * by cgroup_lock_live_cgroup() that it is not removed and we 4993 * are still under the same cgroup_mutex. So we can postpone 4994 * css_get(). 4995 */ 4996 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) 4997 goto one_by_one; 4998 if (do_swap_account && res_counter_charge(&mem->memsw, 4999 PAGE_SIZE * count, &dummy)) { 5000 res_counter_uncharge(&mem->res, PAGE_SIZE * count); 5001 goto one_by_one; 5002 } 5003 mc.precharge += count; 5004 return ret; 5005 } 5006 one_by_one: 5007 /* fall back to one by one charge */ 5008 while (count--) { 5009 if (signal_pending(current)) { 5010 ret = -EINTR; 5011 break; 5012 } 5013 if (!batch_count--) { 5014 batch_count = PRECHARGE_COUNT_AT_ONCE; 5015 cond_resched(); 5016 } 5017 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); 5018 if (ret || !mem) 5019 /* mem_cgroup_clear_mc() will do uncharge later */ 5020 return -ENOMEM; 5021 mc.precharge++; 5022 } 5023 return ret; 5024 } 5025 5026 /** 5027 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5028 * @vma: the vma the pte to be checked belongs 5029 * @addr: the address corresponding to the pte to be checked 5030 * @ptent: the pte to be checked 5031 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5032 * 5033 * Returns 5034 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5035 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5036 * move charge. if @target is not NULL, the page is stored in target->page 5037 * with extra refcnt got(Callers should handle it). 5038 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5039 * target for charge migration. if @target is not NULL, the entry is stored 5040 * in target->ent. 5041 * 5042 * Called with pte lock held. 5043 */ 5044 union mc_target { 5045 struct page *page; 5046 swp_entry_t ent; 5047 }; 5048 5049 enum mc_target_type { 5050 MC_TARGET_NONE, /* not used */ 5051 MC_TARGET_PAGE, 5052 MC_TARGET_SWAP, 5053 }; 5054 5055 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5056 unsigned long addr, pte_t ptent) 5057 { 5058 struct page *page = vm_normal_page(vma, addr, ptent); 5059 5060 if (!page || !page_mapped(page)) 5061 return NULL; 5062 if (PageAnon(page)) { 5063 /* we don't move shared anon */ 5064 if (!move_anon() || page_mapcount(page) > 2) 5065 return NULL; 5066 } else if (!move_file()) 5067 /* we ignore mapcount for file pages */ 5068 return NULL; 5069 if (!get_page_unless_zero(page)) 5070 return NULL; 5071 5072 return page; 5073 } 5074 5075 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5076 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5077 { 5078 int usage_count; 5079 struct page *page = NULL; 5080 swp_entry_t ent = pte_to_swp_entry(ptent); 5081 5082 if (!move_anon() || non_swap_entry(ent)) 5083 return NULL; 5084 usage_count = mem_cgroup_count_swap_user(ent, &page); 5085 if (usage_count > 1) { /* we don't move shared anon */ 5086 if (page) 5087 put_page(page); 5088 return NULL; 5089 } 5090 if (do_swap_account) 5091 entry->val = ent.val; 5092 5093 return page; 5094 } 5095 5096 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5097 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5098 { 5099 struct page *page = NULL; 5100 struct inode *inode; 5101 struct address_space *mapping; 5102 pgoff_t pgoff; 5103 5104 if (!vma->vm_file) /* anonymous vma */ 5105 return NULL; 5106 if (!move_file()) 5107 return NULL; 5108 5109 inode = vma->vm_file->f_path.dentry->d_inode; 5110 mapping = vma->vm_file->f_mapping; 5111 if (pte_none(ptent)) 5112 pgoff = linear_page_index(vma, addr); 5113 else /* pte_file(ptent) is true */ 5114 pgoff = pte_to_pgoff(ptent); 5115 5116 /* page is moved even if it's not RSS of this task(page-faulted). */ 5117 if (!mapping_cap_swap_backed(mapping)) { /* normal file */ 5118 page = find_get_page(mapping, pgoff); 5119 } else { /* shmem/tmpfs file. we should take account of swap too. */ 5120 swp_entry_t ent; 5121 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); 5122 if (do_swap_account) 5123 entry->val = ent.val; 5124 } 5125 5126 return page; 5127 } 5128 5129 static int is_target_pte_for_mc(struct vm_area_struct *vma, 5130 unsigned long addr, pte_t ptent, union mc_target *target) 5131 { 5132 struct page *page = NULL; 5133 struct page_cgroup *pc; 5134 int ret = 0; 5135 swp_entry_t ent = { .val = 0 }; 5136 5137 if (pte_present(ptent)) 5138 page = mc_handle_present_pte(vma, addr, ptent); 5139 else if (is_swap_pte(ptent)) 5140 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 5141 else if (pte_none(ptent) || pte_file(ptent)) 5142 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5143 5144 if (!page && !ent.val) 5145 return 0; 5146 if (page) { 5147 pc = lookup_page_cgroup(page); 5148 /* 5149 * Do only loose check w/o page_cgroup lock. 5150 * mem_cgroup_move_account() checks the pc is valid or not under 5151 * the lock. 5152 */ 5153 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 5154 ret = MC_TARGET_PAGE; 5155 if (target) 5156 target->page = page; 5157 } 5158 if (!ret || !target) 5159 put_page(page); 5160 } 5161 /* There is a swap entry and a page doesn't exist or isn't charged */ 5162 if (ent.val && !ret && 5163 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 5164 ret = MC_TARGET_SWAP; 5165 if (target) 5166 target->ent = ent; 5167 } 5168 return ret; 5169 } 5170 5171 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5172 unsigned long addr, unsigned long end, 5173 struct mm_walk *walk) 5174 { 5175 struct vm_area_struct *vma = walk->private; 5176 pte_t *pte; 5177 spinlock_t *ptl; 5178 5179 split_huge_page_pmd(walk->mm, pmd); 5180 5181 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5182 for (; addr != end; pte++, addr += PAGE_SIZE) 5183 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 5184 mc.precharge++; /* increment precharge temporarily */ 5185 pte_unmap_unlock(pte - 1, ptl); 5186 cond_resched(); 5187 5188 return 0; 5189 } 5190 5191 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5192 { 5193 unsigned long precharge; 5194 struct vm_area_struct *vma; 5195 5196 down_read(&mm->mmap_sem); 5197 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5198 struct mm_walk mem_cgroup_count_precharge_walk = { 5199 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5200 .mm = mm, 5201 .private = vma, 5202 }; 5203 if (is_vm_hugetlb_page(vma)) 5204 continue; 5205 walk_page_range(vma->vm_start, vma->vm_end, 5206 &mem_cgroup_count_precharge_walk); 5207 } 5208 up_read(&mm->mmap_sem); 5209 5210 precharge = mc.precharge; 5211 mc.precharge = 0; 5212 5213 return precharge; 5214 } 5215 5216 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5217 { 5218 unsigned long precharge = mem_cgroup_count_precharge(mm); 5219 5220 VM_BUG_ON(mc.moving_task); 5221 mc.moving_task = current; 5222 return mem_cgroup_do_precharge(precharge); 5223 } 5224 5225 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5226 static void __mem_cgroup_clear_mc(void) 5227 { 5228 struct mem_cgroup *from = mc.from; 5229 struct mem_cgroup *to = mc.to; 5230 5231 /* we must uncharge all the leftover precharges from mc.to */ 5232 if (mc.precharge) { 5233 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 5234 mc.precharge = 0; 5235 } 5236 /* 5237 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5238 * we must uncharge here. 5239 */ 5240 if (mc.moved_charge) { 5241 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 5242 mc.moved_charge = 0; 5243 } 5244 /* we must fixup refcnts and charges */ 5245 if (mc.moved_swap) { 5246 /* uncharge swap account from the old cgroup */ 5247 if (!mem_cgroup_is_root(mc.from)) 5248 res_counter_uncharge(&mc.from->memsw, 5249 PAGE_SIZE * mc.moved_swap); 5250 __mem_cgroup_put(mc.from, mc.moved_swap); 5251 5252 if (!mem_cgroup_is_root(mc.to)) { 5253 /* 5254 * we charged both to->res and to->memsw, so we should 5255 * uncharge to->res. 5256 */ 5257 res_counter_uncharge(&mc.to->res, 5258 PAGE_SIZE * mc.moved_swap); 5259 } 5260 /* we've already done mem_cgroup_get(mc.to) */ 5261 mc.moved_swap = 0; 5262 } 5263 memcg_oom_recover(from); 5264 memcg_oom_recover(to); 5265 wake_up_all(&mc.waitq); 5266 } 5267 5268 static void mem_cgroup_clear_mc(void) 5269 { 5270 struct mem_cgroup *from = mc.from; 5271 5272 /* 5273 * we must clear moving_task before waking up waiters at the end of 5274 * task migration. 5275 */ 5276 mc.moving_task = NULL; 5277 __mem_cgroup_clear_mc(); 5278 spin_lock(&mc.lock); 5279 mc.from = NULL; 5280 mc.to = NULL; 5281 spin_unlock(&mc.lock); 5282 mem_cgroup_end_move(from); 5283 } 5284 5285 static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5286 struct cgroup *cgroup, 5287 struct task_struct *p) 5288 { 5289 int ret = 0; 5290 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 5291 5292 if (mem->move_charge_at_immigrate) { 5293 struct mm_struct *mm; 5294 struct mem_cgroup *from = mem_cgroup_from_task(p); 5295 5296 VM_BUG_ON(from == mem); 5297 5298 mm = get_task_mm(p); 5299 if (!mm) 5300 return 0; 5301 /* We move charges only when we move a owner of the mm */ 5302 if (mm->owner == p) { 5303 VM_BUG_ON(mc.from); 5304 VM_BUG_ON(mc.to); 5305 VM_BUG_ON(mc.precharge); 5306 VM_BUG_ON(mc.moved_charge); 5307 VM_BUG_ON(mc.moved_swap); 5308 mem_cgroup_start_move(from); 5309 spin_lock(&mc.lock); 5310 mc.from = from; 5311 mc.to = mem; 5312 spin_unlock(&mc.lock); 5313 /* We set mc.moving_task later */ 5314 5315 ret = mem_cgroup_precharge_mc(mm); 5316 if (ret) 5317 mem_cgroup_clear_mc(); 5318 } 5319 mmput(mm); 5320 } 5321 return ret; 5322 } 5323 5324 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5325 struct cgroup *cgroup, 5326 struct task_struct *p) 5327 { 5328 mem_cgroup_clear_mc(); 5329 } 5330 5331 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5332 unsigned long addr, unsigned long end, 5333 struct mm_walk *walk) 5334 { 5335 int ret = 0; 5336 struct vm_area_struct *vma = walk->private; 5337 pte_t *pte; 5338 spinlock_t *ptl; 5339 5340 split_huge_page_pmd(walk->mm, pmd); 5341 retry: 5342 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5343 for (; addr != end; addr += PAGE_SIZE) { 5344 pte_t ptent = *(pte++); 5345 union mc_target target; 5346 int type; 5347 struct page *page; 5348 struct page_cgroup *pc; 5349 swp_entry_t ent; 5350 5351 if (!mc.precharge) 5352 break; 5353 5354 type = is_target_pte_for_mc(vma, addr, ptent, &target); 5355 switch (type) { 5356 case MC_TARGET_PAGE: 5357 page = target.page; 5358 if (isolate_lru_page(page)) 5359 goto put; 5360 pc = lookup_page_cgroup(page); 5361 if (!mem_cgroup_move_account(page, 1, pc, 5362 mc.from, mc.to, false)) { 5363 mc.precharge--; 5364 /* we uncharge from mc.from later. */ 5365 mc.moved_charge++; 5366 } 5367 putback_lru_page(page); 5368 put: /* is_target_pte_for_mc() gets the page */ 5369 put_page(page); 5370 break; 5371 case MC_TARGET_SWAP: 5372 ent = target.ent; 5373 if (!mem_cgroup_move_swap_account(ent, 5374 mc.from, mc.to, false)) { 5375 mc.precharge--; 5376 /* we fixup refcnts and charges later. */ 5377 mc.moved_swap++; 5378 } 5379 break; 5380 default: 5381 break; 5382 } 5383 } 5384 pte_unmap_unlock(pte - 1, ptl); 5385 cond_resched(); 5386 5387 if (addr != end) { 5388 /* 5389 * We have consumed all precharges we got in can_attach(). 5390 * We try charge one by one, but don't do any additional 5391 * charges to mc.to if we have failed in charge once in attach() 5392 * phase. 5393 */ 5394 ret = mem_cgroup_do_precharge(1); 5395 if (!ret) 5396 goto retry; 5397 } 5398 5399 return ret; 5400 } 5401 5402 static void mem_cgroup_move_charge(struct mm_struct *mm) 5403 { 5404 struct vm_area_struct *vma; 5405 5406 lru_add_drain_all(); 5407 retry: 5408 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5409 /* 5410 * Someone who are holding the mmap_sem might be waiting in 5411 * waitq. So we cancel all extra charges, wake up all waiters, 5412 * and retry. Because we cancel precharges, we might not be able 5413 * to move enough charges, but moving charge is a best-effort 5414 * feature anyway, so it wouldn't be a big problem. 5415 */ 5416 __mem_cgroup_clear_mc(); 5417 cond_resched(); 5418 goto retry; 5419 } 5420 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5421 int ret; 5422 struct mm_walk mem_cgroup_move_charge_walk = { 5423 .pmd_entry = mem_cgroup_move_charge_pte_range, 5424 .mm = mm, 5425 .private = vma, 5426 }; 5427 if (is_vm_hugetlb_page(vma)) 5428 continue; 5429 ret = walk_page_range(vma->vm_start, vma->vm_end, 5430 &mem_cgroup_move_charge_walk); 5431 if (ret) 5432 /* 5433 * means we have consumed all precharges and failed in 5434 * doing additional charge. Just abandon here. 5435 */ 5436 break; 5437 } 5438 up_read(&mm->mmap_sem); 5439 } 5440 5441 static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5442 struct cgroup *cont, 5443 struct cgroup *old_cont, 5444 struct task_struct *p) 5445 { 5446 struct mm_struct *mm = get_task_mm(p); 5447 5448 if (mm) { 5449 if (mc.to) 5450 mem_cgroup_move_charge(mm); 5451 put_swap_token(mm); 5452 mmput(mm); 5453 } 5454 if (mc.to) 5455 mem_cgroup_clear_mc(); 5456 } 5457 #else /* !CONFIG_MMU */ 5458 static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5459 struct cgroup *cgroup, 5460 struct task_struct *p) 5461 { 5462 return 0; 5463 } 5464 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5465 struct cgroup *cgroup, 5466 struct task_struct *p) 5467 { 5468 } 5469 static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5470 struct cgroup *cont, 5471 struct cgroup *old_cont, 5472 struct task_struct *p) 5473 { 5474 } 5475 #endif 5476 5477 struct cgroup_subsys mem_cgroup_subsys = { 5478 .name = "memory", 5479 .subsys_id = mem_cgroup_subsys_id, 5480 .create = mem_cgroup_create, 5481 .pre_destroy = mem_cgroup_pre_destroy, 5482 .destroy = mem_cgroup_destroy, 5483 .populate = mem_cgroup_populate, 5484 .can_attach = mem_cgroup_can_attach, 5485 .cancel_attach = mem_cgroup_cancel_attach, 5486 .attach = mem_cgroup_move_task, 5487 .early_init = 0, 5488 .use_id = 1, 5489 }; 5490 5491 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5492 static int __init enable_swap_account(char *s) 5493 { 5494 /* consider enabled if no parameter or 1 is given */ 5495 if (!strcmp(s, "1")) 5496 really_do_swap_account = 1; 5497 else if (!strcmp(s, "0")) 5498 really_do_swap_account = 0; 5499 return 1; 5500 } 5501 __setup("swapaccount=", enable_swap_account); 5502 5503 #endif 5504