xref: /linux/mm/memcontrol.c (revision ce00aa0a72ebc10270aa093a866f3c181623fd58)
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * Kernel Memory Controller
14  * Copyright (C) 2012 Parallels Inc. and Google Inc.
15  * Authors: Glauber Costa and Suleiman Souhlal
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License as published by
19  * the Free Software Foundation; either version 2 of the License, or
20  * (at your option) any later version.
21  *
22  * This program is distributed in the hope that it will be useful,
23  * but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  * GNU General Public License for more details.
26  */
27 
28 #include <linux/res_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/mm.h>
32 #include <linux/hugetlb.h>
33 #include <linux/pagemap.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/page_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include "internal.h"
60 #include <net/sock.h>
61 #include <net/ip.h>
62 #include <net/tcp_memcontrol.h>
63 #include "slab.h"
64 
65 #include <asm/uaccess.h>
66 
67 #include <trace/events/vmscan.h>
68 
69 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70 EXPORT_SYMBOL(memory_cgrp_subsys);
71 
72 #define MEM_CGROUP_RECLAIM_RETRIES	5
73 static struct mem_cgroup *root_mem_cgroup __read_mostly;
74 
75 #ifdef CONFIG_MEMCG_SWAP
76 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
77 int do_swap_account __read_mostly;
78 
79 /* for remember boot option*/
80 #ifdef CONFIG_MEMCG_SWAP_ENABLED
81 static int really_do_swap_account __initdata = 1;
82 #else
83 static int really_do_swap_account __initdata = 0;
84 #endif
85 
86 #else
87 #define do_swap_account		0
88 #endif
89 
90 
91 static const char * const mem_cgroup_stat_names[] = {
92 	"cache",
93 	"rss",
94 	"rss_huge",
95 	"mapped_file",
96 	"writeback",
97 	"swap",
98 };
99 
100 enum mem_cgroup_events_index {
101 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
102 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
103 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
104 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
105 	MEM_CGROUP_EVENTS_NSTATS,
106 };
107 
108 static const char * const mem_cgroup_events_names[] = {
109 	"pgpgin",
110 	"pgpgout",
111 	"pgfault",
112 	"pgmajfault",
113 };
114 
115 static const char * const mem_cgroup_lru_names[] = {
116 	"inactive_anon",
117 	"active_anon",
118 	"inactive_file",
119 	"active_file",
120 	"unevictable",
121 };
122 
123 /*
124  * Per memcg event counter is incremented at every pagein/pageout. With THP,
125  * it will be incremated by the number of pages. This counter is used for
126  * for trigger some periodic events. This is straightforward and better
127  * than using jiffies etc. to handle periodic memcg event.
128  */
129 enum mem_cgroup_events_target {
130 	MEM_CGROUP_TARGET_THRESH,
131 	MEM_CGROUP_TARGET_SOFTLIMIT,
132 	MEM_CGROUP_TARGET_NUMAINFO,
133 	MEM_CGROUP_NTARGETS,
134 };
135 #define THRESHOLDS_EVENTS_TARGET 128
136 #define SOFTLIMIT_EVENTS_TARGET 1024
137 #define NUMAINFO_EVENTS_TARGET	1024
138 
139 struct mem_cgroup_stat_cpu {
140 	long count[MEM_CGROUP_STAT_NSTATS];
141 	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
142 	unsigned long nr_page_events;
143 	unsigned long targets[MEM_CGROUP_NTARGETS];
144 };
145 
146 struct mem_cgroup_reclaim_iter {
147 	/*
148 	 * last scanned hierarchy member. Valid only if last_dead_count
149 	 * matches memcg->dead_count of the hierarchy root group.
150 	 */
151 	struct mem_cgroup *last_visited;
152 	int last_dead_count;
153 
154 	/* scan generation, increased every round-trip */
155 	unsigned int generation;
156 };
157 
158 /*
159  * per-zone information in memory controller.
160  */
161 struct mem_cgroup_per_zone {
162 	struct lruvec		lruvec;
163 	unsigned long		lru_size[NR_LRU_LISTS];
164 
165 	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166 
167 	struct rb_node		tree_node;	/* RB tree node */
168 	unsigned long long	usage_in_excess;/* Set to the value by which */
169 						/* the soft limit is exceeded*/
170 	bool			on_tree;
171 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
172 						/* use container_of	   */
173 };
174 
175 struct mem_cgroup_per_node {
176 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177 };
178 
179 /*
180  * Cgroups above their limits are maintained in a RB-Tree, independent of
181  * their hierarchy representation
182  */
183 
184 struct mem_cgroup_tree_per_zone {
185 	struct rb_root rb_root;
186 	spinlock_t lock;
187 };
188 
189 struct mem_cgroup_tree_per_node {
190 	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191 };
192 
193 struct mem_cgroup_tree {
194 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195 };
196 
197 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198 
199 struct mem_cgroup_threshold {
200 	struct eventfd_ctx *eventfd;
201 	u64 threshold;
202 };
203 
204 /* For threshold */
205 struct mem_cgroup_threshold_ary {
206 	/* An array index points to threshold just below or equal to usage. */
207 	int current_threshold;
208 	/* Size of entries[] */
209 	unsigned int size;
210 	/* Array of thresholds */
211 	struct mem_cgroup_threshold entries[0];
212 };
213 
214 struct mem_cgroup_thresholds {
215 	/* Primary thresholds array */
216 	struct mem_cgroup_threshold_ary *primary;
217 	/*
218 	 * Spare threshold array.
219 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 	 * It must be able to store at least primary->size - 1 entries.
221 	 */
222 	struct mem_cgroup_threshold_ary *spare;
223 };
224 
225 /* for OOM */
226 struct mem_cgroup_eventfd_list {
227 	struct list_head list;
228 	struct eventfd_ctx *eventfd;
229 };
230 
231 /*
232  * cgroup_event represents events which userspace want to receive.
233  */
234 struct mem_cgroup_event {
235 	/*
236 	 * memcg which the event belongs to.
237 	 */
238 	struct mem_cgroup *memcg;
239 	/*
240 	 * eventfd to signal userspace about the event.
241 	 */
242 	struct eventfd_ctx *eventfd;
243 	/*
244 	 * Each of these stored in a list by the cgroup.
245 	 */
246 	struct list_head list;
247 	/*
248 	 * register_event() callback will be used to add new userspace
249 	 * waiter for changes related to this event.  Use eventfd_signal()
250 	 * on eventfd to send notification to userspace.
251 	 */
252 	int (*register_event)(struct mem_cgroup *memcg,
253 			      struct eventfd_ctx *eventfd, const char *args);
254 	/*
255 	 * unregister_event() callback will be called when userspace closes
256 	 * the eventfd or on cgroup removing.  This callback must be set,
257 	 * if you want provide notification functionality.
258 	 */
259 	void (*unregister_event)(struct mem_cgroup *memcg,
260 				 struct eventfd_ctx *eventfd);
261 	/*
262 	 * All fields below needed to unregister event when
263 	 * userspace closes eventfd.
264 	 */
265 	poll_table pt;
266 	wait_queue_head_t *wqh;
267 	wait_queue_t wait;
268 	struct work_struct remove;
269 };
270 
271 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
273 
274 /*
275  * The memory controller data structure. The memory controller controls both
276  * page cache and RSS per cgroup. We would eventually like to provide
277  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278  * to help the administrator determine what knobs to tune.
279  *
280  * TODO: Add a water mark for the memory controller. Reclaim will begin when
281  * we hit the water mark. May be even add a low water mark, such that
282  * no reclaim occurs from a cgroup at it's low water mark, this is
283  * a feature that will be implemented much later in the future.
284  */
285 struct mem_cgroup {
286 	struct cgroup_subsys_state css;
287 	/*
288 	 * the counter to account for memory usage
289 	 */
290 	struct res_counter res;
291 
292 	/* vmpressure notifications */
293 	struct vmpressure vmpressure;
294 
295 	/*
296 	 * the counter to account for mem+swap usage.
297 	 */
298 	struct res_counter memsw;
299 
300 	/*
301 	 * the counter to account for kernel memory usage.
302 	 */
303 	struct res_counter kmem;
304 	/*
305 	 * Should the accounting and control be hierarchical, per subtree?
306 	 */
307 	bool use_hierarchy;
308 	unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
309 
310 	bool		oom_lock;
311 	atomic_t	under_oom;
312 	atomic_t	oom_wakeups;
313 
314 	int	swappiness;
315 	/* OOM-Killer disable */
316 	int		oom_kill_disable;
317 
318 	/* set when res.limit == memsw.limit */
319 	bool		memsw_is_minimum;
320 
321 	/* protect arrays of thresholds */
322 	struct mutex thresholds_lock;
323 
324 	/* thresholds for memory usage. RCU-protected */
325 	struct mem_cgroup_thresholds thresholds;
326 
327 	/* thresholds for mem+swap usage. RCU-protected */
328 	struct mem_cgroup_thresholds memsw_thresholds;
329 
330 	/* For oom notifier event fd */
331 	struct list_head oom_notify;
332 
333 	/*
334 	 * Should we move charges of a task when a task is moved into this
335 	 * mem_cgroup ? And what type of charges should we move ?
336 	 */
337 	unsigned long move_charge_at_immigrate;
338 	/*
339 	 * set > 0 if pages under this cgroup are moving to other cgroup.
340 	 */
341 	atomic_t	moving_account;
342 	/* taken only while moving_account > 0 */
343 	spinlock_t	move_lock;
344 	/*
345 	 * percpu counter.
346 	 */
347 	struct mem_cgroup_stat_cpu __percpu *stat;
348 	/*
349 	 * used when a cpu is offlined or other synchronizations
350 	 * See mem_cgroup_read_stat().
351 	 */
352 	struct mem_cgroup_stat_cpu nocpu_base;
353 	spinlock_t pcp_counter_lock;
354 
355 	atomic_t	dead_count;
356 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
357 	struct cg_proto tcp_mem;
358 #endif
359 #if defined(CONFIG_MEMCG_KMEM)
360 	/* analogous to slab_common's slab_caches list. per-memcg */
361 	struct list_head memcg_slab_caches;
362 	/* Not a spinlock, we can take a lot of time walking the list */
363 	struct mutex slab_caches_mutex;
364         /* Index in the kmem_cache->memcg_params->memcg_caches array */
365 	int kmemcg_id;
366 #endif
367 
368 	int last_scanned_node;
369 #if MAX_NUMNODES > 1
370 	nodemask_t	scan_nodes;
371 	atomic_t	numainfo_events;
372 	atomic_t	numainfo_updating;
373 #endif
374 
375 	/* List of events which userspace want to receive */
376 	struct list_head event_list;
377 	spinlock_t event_list_lock;
378 
379 	struct mem_cgroup_per_node *nodeinfo[0];
380 	/* WARNING: nodeinfo must be the last member here */
381 };
382 
383 /* internal only representation about the status of kmem accounting. */
384 enum {
385 	KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
386 	KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
387 };
388 
389 #ifdef CONFIG_MEMCG_KMEM
390 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
391 {
392 	set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
393 }
394 
395 static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
396 {
397 	return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
398 }
399 
400 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
401 {
402 	/*
403 	 * Our caller must use css_get() first, because memcg_uncharge_kmem()
404 	 * will call css_put() if it sees the memcg is dead.
405 	 */
406 	smp_wmb();
407 	if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
408 		set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
409 }
410 
411 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
412 {
413 	return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
414 				  &memcg->kmem_account_flags);
415 }
416 #endif
417 
418 /* Stuffs for move charges at task migration. */
419 /*
420  * Types of charges to be moved. "move_charge_at_immitgrate" and
421  * "immigrate_flags" are treated as a left-shifted bitmap of these types.
422  */
423 enum move_type {
424 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
425 	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
426 	NR_MOVE_TYPE,
427 };
428 
429 /* "mc" and its members are protected by cgroup_mutex */
430 static struct move_charge_struct {
431 	spinlock_t	  lock; /* for from, to */
432 	struct mem_cgroup *from;
433 	struct mem_cgroup *to;
434 	unsigned long immigrate_flags;
435 	unsigned long precharge;
436 	unsigned long moved_charge;
437 	unsigned long moved_swap;
438 	struct task_struct *moving_task;	/* a task moving charges */
439 	wait_queue_head_t waitq;		/* a waitq for other context */
440 } mc = {
441 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
442 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
443 };
444 
445 static bool move_anon(void)
446 {
447 	return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
448 }
449 
450 static bool move_file(void)
451 {
452 	return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
453 }
454 
455 /*
456  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
457  * limit reclaim to prevent infinite loops, if they ever occur.
458  */
459 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
460 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
461 
462 enum charge_type {
463 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
464 	MEM_CGROUP_CHARGE_TYPE_ANON,
465 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
466 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
467 	NR_CHARGE_TYPE,
468 };
469 
470 /* for encoding cft->private value on file */
471 enum res_type {
472 	_MEM,
473 	_MEMSWAP,
474 	_OOM_TYPE,
475 	_KMEM,
476 };
477 
478 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
479 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
480 #define MEMFILE_ATTR(val)	((val) & 0xffff)
481 /* Used for OOM nofiier */
482 #define OOM_CONTROL		(0)
483 
484 /*
485  * Reclaim flags for mem_cgroup_hierarchical_reclaim
486  */
487 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
488 #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
489 #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
490 #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
491 
492 /*
493  * The memcg_create_mutex will be held whenever a new cgroup is created.
494  * As a consequence, any change that needs to protect against new child cgroups
495  * appearing has to hold it as well.
496  */
497 static DEFINE_MUTEX(memcg_create_mutex);
498 
499 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
500 {
501 	return s ? container_of(s, struct mem_cgroup, css) : NULL;
502 }
503 
504 /* Some nice accessors for the vmpressure. */
505 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
506 {
507 	if (!memcg)
508 		memcg = root_mem_cgroup;
509 	return &memcg->vmpressure;
510 }
511 
512 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
513 {
514 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
515 }
516 
517 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
518 {
519 	return (memcg == root_mem_cgroup);
520 }
521 
522 /*
523  * We restrict the id in the range of [1, 65535], so it can fit into
524  * an unsigned short.
525  */
526 #define MEM_CGROUP_ID_MAX	USHRT_MAX
527 
528 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
529 {
530 	/*
531 	 * The ID of the root cgroup is 0, but memcg treat 0 as an
532 	 * invalid ID, so we return (cgroup_id + 1).
533 	 */
534 	return memcg->css.cgroup->id + 1;
535 }
536 
537 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
538 {
539 	struct cgroup_subsys_state *css;
540 
541 	css = css_from_id(id - 1, &memory_cgrp_subsys);
542 	return mem_cgroup_from_css(css);
543 }
544 
545 /* Writing them here to avoid exposing memcg's inner layout */
546 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
547 
548 void sock_update_memcg(struct sock *sk)
549 {
550 	if (mem_cgroup_sockets_enabled) {
551 		struct mem_cgroup *memcg;
552 		struct cg_proto *cg_proto;
553 
554 		BUG_ON(!sk->sk_prot->proto_cgroup);
555 
556 		/* Socket cloning can throw us here with sk_cgrp already
557 		 * filled. It won't however, necessarily happen from
558 		 * process context. So the test for root memcg given
559 		 * the current task's memcg won't help us in this case.
560 		 *
561 		 * Respecting the original socket's memcg is a better
562 		 * decision in this case.
563 		 */
564 		if (sk->sk_cgrp) {
565 			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
566 			css_get(&sk->sk_cgrp->memcg->css);
567 			return;
568 		}
569 
570 		rcu_read_lock();
571 		memcg = mem_cgroup_from_task(current);
572 		cg_proto = sk->sk_prot->proto_cgroup(memcg);
573 		if (!mem_cgroup_is_root(memcg) &&
574 		    memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
575 			sk->sk_cgrp = cg_proto;
576 		}
577 		rcu_read_unlock();
578 	}
579 }
580 EXPORT_SYMBOL(sock_update_memcg);
581 
582 void sock_release_memcg(struct sock *sk)
583 {
584 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
585 		struct mem_cgroup *memcg;
586 		WARN_ON(!sk->sk_cgrp->memcg);
587 		memcg = sk->sk_cgrp->memcg;
588 		css_put(&sk->sk_cgrp->memcg->css);
589 	}
590 }
591 
592 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
593 {
594 	if (!memcg || mem_cgroup_is_root(memcg))
595 		return NULL;
596 
597 	return &memcg->tcp_mem;
598 }
599 EXPORT_SYMBOL(tcp_proto_cgroup);
600 
601 static void disarm_sock_keys(struct mem_cgroup *memcg)
602 {
603 	if (!memcg_proto_activated(&memcg->tcp_mem))
604 		return;
605 	static_key_slow_dec(&memcg_socket_limit_enabled);
606 }
607 #else
608 static void disarm_sock_keys(struct mem_cgroup *memcg)
609 {
610 }
611 #endif
612 
613 #ifdef CONFIG_MEMCG_KMEM
614 /*
615  * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
616  * The main reason for not using cgroup id for this:
617  *  this works better in sparse environments, where we have a lot of memcgs,
618  *  but only a few kmem-limited. Or also, if we have, for instance, 200
619  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
620  *  200 entry array for that.
621  *
622  * The current size of the caches array is stored in
623  * memcg_limited_groups_array_size.  It will double each time we have to
624  * increase it.
625  */
626 static DEFINE_IDA(kmem_limited_groups);
627 int memcg_limited_groups_array_size;
628 
629 /*
630  * MIN_SIZE is different than 1, because we would like to avoid going through
631  * the alloc/free process all the time. In a small machine, 4 kmem-limited
632  * cgroups is a reasonable guess. In the future, it could be a parameter or
633  * tunable, but that is strictly not necessary.
634  *
635  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
636  * this constant directly from cgroup, but it is understandable that this is
637  * better kept as an internal representation in cgroup.c. In any case, the
638  * cgrp_id space is not getting any smaller, and we don't have to necessarily
639  * increase ours as well if it increases.
640  */
641 #define MEMCG_CACHES_MIN_SIZE 4
642 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
643 
644 /*
645  * A lot of the calls to the cache allocation functions are expected to be
646  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
647  * conditional to this static branch, we'll have to allow modules that does
648  * kmem_cache_alloc and the such to see this symbol as well
649  */
650 struct static_key memcg_kmem_enabled_key;
651 EXPORT_SYMBOL(memcg_kmem_enabled_key);
652 
653 static void disarm_kmem_keys(struct mem_cgroup *memcg)
654 {
655 	if (memcg_kmem_is_active(memcg)) {
656 		static_key_slow_dec(&memcg_kmem_enabled_key);
657 		ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
658 	}
659 	/*
660 	 * This check can't live in kmem destruction function,
661 	 * since the charges will outlive the cgroup
662 	 */
663 	WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
664 }
665 #else
666 static void disarm_kmem_keys(struct mem_cgroup *memcg)
667 {
668 }
669 #endif /* CONFIG_MEMCG_KMEM */
670 
671 static void disarm_static_keys(struct mem_cgroup *memcg)
672 {
673 	disarm_sock_keys(memcg);
674 	disarm_kmem_keys(memcg);
675 }
676 
677 static void drain_all_stock_async(struct mem_cgroup *memcg);
678 
679 static struct mem_cgroup_per_zone *
680 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
681 {
682 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
683 	return &memcg->nodeinfo[nid]->zoneinfo[zid];
684 }
685 
686 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
687 {
688 	return &memcg->css;
689 }
690 
691 static struct mem_cgroup_per_zone *
692 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
693 {
694 	int nid = page_to_nid(page);
695 	int zid = page_zonenum(page);
696 
697 	return mem_cgroup_zoneinfo(memcg, nid, zid);
698 }
699 
700 static struct mem_cgroup_tree_per_zone *
701 soft_limit_tree_node_zone(int nid, int zid)
702 {
703 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
704 }
705 
706 static struct mem_cgroup_tree_per_zone *
707 soft_limit_tree_from_page(struct page *page)
708 {
709 	int nid = page_to_nid(page);
710 	int zid = page_zonenum(page);
711 
712 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
713 }
714 
715 static void
716 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
717 				struct mem_cgroup_per_zone *mz,
718 				struct mem_cgroup_tree_per_zone *mctz,
719 				unsigned long long new_usage_in_excess)
720 {
721 	struct rb_node **p = &mctz->rb_root.rb_node;
722 	struct rb_node *parent = NULL;
723 	struct mem_cgroup_per_zone *mz_node;
724 
725 	if (mz->on_tree)
726 		return;
727 
728 	mz->usage_in_excess = new_usage_in_excess;
729 	if (!mz->usage_in_excess)
730 		return;
731 	while (*p) {
732 		parent = *p;
733 		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
734 					tree_node);
735 		if (mz->usage_in_excess < mz_node->usage_in_excess)
736 			p = &(*p)->rb_left;
737 		/*
738 		 * We can't avoid mem cgroups that are over their soft
739 		 * limit by the same amount
740 		 */
741 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
742 			p = &(*p)->rb_right;
743 	}
744 	rb_link_node(&mz->tree_node, parent, p);
745 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
746 	mz->on_tree = true;
747 }
748 
749 static void
750 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
751 				struct mem_cgroup_per_zone *mz,
752 				struct mem_cgroup_tree_per_zone *mctz)
753 {
754 	if (!mz->on_tree)
755 		return;
756 	rb_erase(&mz->tree_node, &mctz->rb_root);
757 	mz->on_tree = false;
758 }
759 
760 static void
761 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
762 				struct mem_cgroup_per_zone *mz,
763 				struct mem_cgroup_tree_per_zone *mctz)
764 {
765 	spin_lock(&mctz->lock);
766 	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
767 	spin_unlock(&mctz->lock);
768 }
769 
770 
771 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
772 {
773 	unsigned long long excess;
774 	struct mem_cgroup_per_zone *mz;
775 	struct mem_cgroup_tree_per_zone *mctz;
776 	int nid = page_to_nid(page);
777 	int zid = page_zonenum(page);
778 	mctz = soft_limit_tree_from_page(page);
779 
780 	/*
781 	 * Necessary to update all ancestors when hierarchy is used.
782 	 * because their event counter is not touched.
783 	 */
784 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
785 		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
786 		excess = res_counter_soft_limit_excess(&memcg->res);
787 		/*
788 		 * We have to update the tree if mz is on RB-tree or
789 		 * mem is over its softlimit.
790 		 */
791 		if (excess || mz->on_tree) {
792 			spin_lock(&mctz->lock);
793 			/* if on-tree, remove it */
794 			if (mz->on_tree)
795 				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
796 			/*
797 			 * Insert again. mz->usage_in_excess will be updated.
798 			 * If excess is 0, no tree ops.
799 			 */
800 			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
801 			spin_unlock(&mctz->lock);
802 		}
803 	}
804 }
805 
806 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
807 {
808 	int node, zone;
809 	struct mem_cgroup_per_zone *mz;
810 	struct mem_cgroup_tree_per_zone *mctz;
811 
812 	for_each_node(node) {
813 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
814 			mz = mem_cgroup_zoneinfo(memcg, node, zone);
815 			mctz = soft_limit_tree_node_zone(node, zone);
816 			mem_cgroup_remove_exceeded(memcg, mz, mctz);
817 		}
818 	}
819 }
820 
821 static struct mem_cgroup_per_zone *
822 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
823 {
824 	struct rb_node *rightmost = NULL;
825 	struct mem_cgroup_per_zone *mz;
826 
827 retry:
828 	mz = NULL;
829 	rightmost = rb_last(&mctz->rb_root);
830 	if (!rightmost)
831 		goto done;		/* Nothing to reclaim from */
832 
833 	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
834 	/*
835 	 * Remove the node now but someone else can add it back,
836 	 * we will to add it back at the end of reclaim to its correct
837 	 * position in the tree.
838 	 */
839 	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
840 	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
841 		!css_tryget(&mz->memcg->css))
842 		goto retry;
843 done:
844 	return mz;
845 }
846 
847 static struct mem_cgroup_per_zone *
848 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
849 {
850 	struct mem_cgroup_per_zone *mz;
851 
852 	spin_lock(&mctz->lock);
853 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
854 	spin_unlock(&mctz->lock);
855 	return mz;
856 }
857 
858 /*
859  * Implementation Note: reading percpu statistics for memcg.
860  *
861  * Both of vmstat[] and percpu_counter has threshold and do periodic
862  * synchronization to implement "quick" read. There are trade-off between
863  * reading cost and precision of value. Then, we may have a chance to implement
864  * a periodic synchronizion of counter in memcg's counter.
865  *
866  * But this _read() function is used for user interface now. The user accounts
867  * memory usage by memory cgroup and he _always_ requires exact value because
868  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
869  * have to visit all online cpus and make sum. So, for now, unnecessary
870  * synchronization is not implemented. (just implemented for cpu hotplug)
871  *
872  * If there are kernel internal actions which can make use of some not-exact
873  * value, and reading all cpu value can be performance bottleneck in some
874  * common workload, threashold and synchonization as vmstat[] should be
875  * implemented.
876  */
877 static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
878 				 enum mem_cgroup_stat_index idx)
879 {
880 	long val = 0;
881 	int cpu;
882 
883 	get_online_cpus();
884 	for_each_online_cpu(cpu)
885 		val += per_cpu(memcg->stat->count[idx], cpu);
886 #ifdef CONFIG_HOTPLUG_CPU
887 	spin_lock(&memcg->pcp_counter_lock);
888 	val += memcg->nocpu_base.count[idx];
889 	spin_unlock(&memcg->pcp_counter_lock);
890 #endif
891 	put_online_cpus();
892 	return val;
893 }
894 
895 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
896 					 bool charge)
897 {
898 	int val = (charge) ? 1 : -1;
899 	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
900 }
901 
902 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
903 					    enum mem_cgroup_events_index idx)
904 {
905 	unsigned long val = 0;
906 	int cpu;
907 
908 	get_online_cpus();
909 	for_each_online_cpu(cpu)
910 		val += per_cpu(memcg->stat->events[idx], cpu);
911 #ifdef CONFIG_HOTPLUG_CPU
912 	spin_lock(&memcg->pcp_counter_lock);
913 	val += memcg->nocpu_base.events[idx];
914 	spin_unlock(&memcg->pcp_counter_lock);
915 #endif
916 	put_online_cpus();
917 	return val;
918 }
919 
920 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
921 					 struct page *page,
922 					 bool anon, int nr_pages)
923 {
924 	/*
925 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
926 	 * counted as CACHE even if it's on ANON LRU.
927 	 */
928 	if (anon)
929 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
930 				nr_pages);
931 	else
932 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
933 				nr_pages);
934 
935 	if (PageTransHuge(page))
936 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
937 				nr_pages);
938 
939 	/* pagein of a big page is an event. So, ignore page size */
940 	if (nr_pages > 0)
941 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
942 	else {
943 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
944 		nr_pages = -nr_pages; /* for event */
945 	}
946 
947 	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
948 }
949 
950 unsigned long
951 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
952 {
953 	struct mem_cgroup_per_zone *mz;
954 
955 	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
956 	return mz->lru_size[lru];
957 }
958 
959 static unsigned long
960 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
961 			unsigned int lru_mask)
962 {
963 	struct mem_cgroup_per_zone *mz;
964 	enum lru_list lru;
965 	unsigned long ret = 0;
966 
967 	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
968 
969 	for_each_lru(lru) {
970 		if (BIT(lru) & lru_mask)
971 			ret += mz->lru_size[lru];
972 	}
973 	return ret;
974 }
975 
976 static unsigned long
977 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
978 			int nid, unsigned int lru_mask)
979 {
980 	u64 total = 0;
981 	int zid;
982 
983 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
984 		total += mem_cgroup_zone_nr_lru_pages(memcg,
985 						nid, zid, lru_mask);
986 
987 	return total;
988 }
989 
990 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
991 			unsigned int lru_mask)
992 {
993 	int nid;
994 	u64 total = 0;
995 
996 	for_each_node_state(nid, N_MEMORY)
997 		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
998 	return total;
999 }
1000 
1001 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002 				       enum mem_cgroup_events_target target)
1003 {
1004 	unsigned long val, next;
1005 
1006 	val = __this_cpu_read(memcg->stat->nr_page_events);
1007 	next = __this_cpu_read(memcg->stat->targets[target]);
1008 	/* from time_after() in jiffies.h */
1009 	if ((long)next - (long)val < 0) {
1010 		switch (target) {
1011 		case MEM_CGROUP_TARGET_THRESH:
1012 			next = val + THRESHOLDS_EVENTS_TARGET;
1013 			break;
1014 		case MEM_CGROUP_TARGET_SOFTLIMIT:
1015 			next = val + SOFTLIMIT_EVENTS_TARGET;
1016 			break;
1017 		case MEM_CGROUP_TARGET_NUMAINFO:
1018 			next = val + NUMAINFO_EVENTS_TARGET;
1019 			break;
1020 		default:
1021 			break;
1022 		}
1023 		__this_cpu_write(memcg->stat->targets[target], next);
1024 		return true;
1025 	}
1026 	return false;
1027 }
1028 
1029 /*
1030  * Check events in order.
1031  *
1032  */
1033 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1034 {
1035 	preempt_disable();
1036 	/* threshold event is triggered in finer grain than soft limit */
1037 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038 						MEM_CGROUP_TARGET_THRESH))) {
1039 		bool do_softlimit;
1040 		bool do_numainfo __maybe_unused;
1041 
1042 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043 						MEM_CGROUP_TARGET_SOFTLIMIT);
1044 #if MAX_NUMNODES > 1
1045 		do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046 						MEM_CGROUP_TARGET_NUMAINFO);
1047 #endif
1048 		preempt_enable();
1049 
1050 		mem_cgroup_threshold(memcg);
1051 		if (unlikely(do_softlimit))
1052 			mem_cgroup_update_tree(memcg, page);
1053 #if MAX_NUMNODES > 1
1054 		if (unlikely(do_numainfo))
1055 			atomic_inc(&memcg->numainfo_events);
1056 #endif
1057 	} else
1058 		preempt_enable();
1059 }
1060 
1061 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1062 {
1063 	/*
1064 	 * mm_update_next_owner() may clear mm->owner to NULL
1065 	 * if it races with swapoff, page migration, etc.
1066 	 * So this can be called with p == NULL.
1067 	 */
1068 	if (unlikely(!p))
1069 		return NULL;
1070 
1071 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1072 }
1073 
1074 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1075 {
1076 	struct mem_cgroup *memcg = NULL;
1077 
1078 	rcu_read_lock();
1079 	do {
1080 		/*
1081 		 * Page cache insertions can happen withou an
1082 		 * actual mm context, e.g. during disk probing
1083 		 * on boot, loopback IO, acct() writes etc.
1084 		 */
1085 		if (unlikely(!mm))
1086 			memcg = root_mem_cgroup;
1087 		else {
1088 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089 			if (unlikely(!memcg))
1090 				memcg = root_mem_cgroup;
1091 		}
1092 	} while (!css_tryget(&memcg->css));
1093 	rcu_read_unlock();
1094 	return memcg;
1095 }
1096 
1097 /*
1098  * Returns a next (in a pre-order walk) alive memcg (with elevated css
1099  * ref. count) or NULL if the whole root's subtree has been visited.
1100  *
1101  * helper function to be used by mem_cgroup_iter
1102  */
1103 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1104 		struct mem_cgroup *last_visited)
1105 {
1106 	struct cgroup_subsys_state *prev_css, *next_css;
1107 
1108 	prev_css = last_visited ? &last_visited->css : NULL;
1109 skip_node:
1110 	next_css = css_next_descendant_pre(prev_css, &root->css);
1111 
1112 	/*
1113 	 * Even if we found a group we have to make sure it is
1114 	 * alive. css && !memcg means that the groups should be
1115 	 * skipped and we should continue the tree walk.
1116 	 * last_visited css is safe to use because it is
1117 	 * protected by css_get and the tree walk is rcu safe.
1118 	 *
1119 	 * We do not take a reference on the root of the tree walk
1120 	 * because we might race with the root removal when it would
1121 	 * be the only node in the iterated hierarchy and mem_cgroup_iter
1122 	 * would end up in an endless loop because it expects that at
1123 	 * least one valid node will be returned. Root cannot disappear
1124 	 * because caller of the iterator should hold it already so
1125 	 * skipping css reference should be safe.
1126 	 */
1127 	if (next_css) {
1128 		if ((next_css == &root->css) ||
1129 		    ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1130 			return mem_cgroup_from_css(next_css);
1131 
1132 		prev_css = next_css;
1133 		goto skip_node;
1134 	}
1135 
1136 	return NULL;
1137 }
1138 
1139 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1140 {
1141 	/*
1142 	 * When a group in the hierarchy below root is destroyed, the
1143 	 * hierarchy iterator can no longer be trusted since it might
1144 	 * have pointed to the destroyed group.  Invalidate it.
1145 	 */
1146 	atomic_inc(&root->dead_count);
1147 }
1148 
1149 static struct mem_cgroup *
1150 mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1151 		     struct mem_cgroup *root,
1152 		     int *sequence)
1153 {
1154 	struct mem_cgroup *position = NULL;
1155 	/*
1156 	 * A cgroup destruction happens in two stages: offlining and
1157 	 * release.  They are separated by a RCU grace period.
1158 	 *
1159 	 * If the iterator is valid, we may still race with an
1160 	 * offlining.  The RCU lock ensures the object won't be
1161 	 * released, tryget will fail if we lost the race.
1162 	 */
1163 	*sequence = atomic_read(&root->dead_count);
1164 	if (iter->last_dead_count == *sequence) {
1165 		smp_rmb();
1166 		position = iter->last_visited;
1167 
1168 		/*
1169 		 * We cannot take a reference to root because we might race
1170 		 * with root removal and returning NULL would end up in
1171 		 * an endless loop on the iterator user level when root
1172 		 * would be returned all the time.
1173 		 */
1174 		if (position && position != root &&
1175 				!css_tryget(&position->css))
1176 			position = NULL;
1177 	}
1178 	return position;
1179 }
1180 
1181 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1182 				   struct mem_cgroup *last_visited,
1183 				   struct mem_cgroup *new_position,
1184 				   struct mem_cgroup *root,
1185 				   int sequence)
1186 {
1187 	/* root reference counting symmetric to mem_cgroup_iter_load */
1188 	if (last_visited && last_visited != root)
1189 		css_put(&last_visited->css);
1190 	/*
1191 	 * We store the sequence count from the time @last_visited was
1192 	 * loaded successfully instead of rereading it here so that we
1193 	 * don't lose destruction events in between.  We could have
1194 	 * raced with the destruction of @new_position after all.
1195 	 */
1196 	iter->last_visited = new_position;
1197 	smp_wmb();
1198 	iter->last_dead_count = sequence;
1199 }
1200 
1201 /**
1202  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1203  * @root: hierarchy root
1204  * @prev: previously returned memcg, NULL on first invocation
1205  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1206  *
1207  * Returns references to children of the hierarchy below @root, or
1208  * @root itself, or %NULL after a full round-trip.
1209  *
1210  * Caller must pass the return value in @prev on subsequent
1211  * invocations for reference counting, or use mem_cgroup_iter_break()
1212  * to cancel a hierarchy walk before the round-trip is complete.
1213  *
1214  * Reclaimers can specify a zone and a priority level in @reclaim to
1215  * divide up the memcgs in the hierarchy among all concurrent
1216  * reclaimers operating on the same zone and priority.
1217  */
1218 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1219 				   struct mem_cgroup *prev,
1220 				   struct mem_cgroup_reclaim_cookie *reclaim)
1221 {
1222 	struct mem_cgroup *memcg = NULL;
1223 	struct mem_cgroup *last_visited = NULL;
1224 
1225 	if (mem_cgroup_disabled())
1226 		return NULL;
1227 
1228 	if (!root)
1229 		root = root_mem_cgroup;
1230 
1231 	if (prev && !reclaim)
1232 		last_visited = prev;
1233 
1234 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1235 		if (prev)
1236 			goto out_css_put;
1237 		return root;
1238 	}
1239 
1240 	rcu_read_lock();
1241 	while (!memcg) {
1242 		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1243 		int uninitialized_var(seq);
1244 
1245 		if (reclaim) {
1246 			int nid = zone_to_nid(reclaim->zone);
1247 			int zid = zone_idx(reclaim->zone);
1248 			struct mem_cgroup_per_zone *mz;
1249 
1250 			mz = mem_cgroup_zoneinfo(root, nid, zid);
1251 			iter = &mz->reclaim_iter[reclaim->priority];
1252 			if (prev && reclaim->generation != iter->generation) {
1253 				iter->last_visited = NULL;
1254 				goto out_unlock;
1255 			}
1256 
1257 			last_visited = mem_cgroup_iter_load(iter, root, &seq);
1258 		}
1259 
1260 		memcg = __mem_cgroup_iter_next(root, last_visited);
1261 
1262 		if (reclaim) {
1263 			mem_cgroup_iter_update(iter, last_visited, memcg, root,
1264 					seq);
1265 
1266 			if (!memcg)
1267 				iter->generation++;
1268 			else if (!prev && memcg)
1269 				reclaim->generation = iter->generation;
1270 		}
1271 
1272 		if (prev && !memcg)
1273 			goto out_unlock;
1274 	}
1275 out_unlock:
1276 	rcu_read_unlock();
1277 out_css_put:
1278 	if (prev && prev != root)
1279 		css_put(&prev->css);
1280 
1281 	return memcg;
1282 }
1283 
1284 /**
1285  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1286  * @root: hierarchy root
1287  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1288  */
1289 void mem_cgroup_iter_break(struct mem_cgroup *root,
1290 			   struct mem_cgroup *prev)
1291 {
1292 	if (!root)
1293 		root = root_mem_cgroup;
1294 	if (prev && prev != root)
1295 		css_put(&prev->css);
1296 }
1297 
1298 /*
1299  * Iteration constructs for visiting all cgroups (under a tree).  If
1300  * loops are exited prematurely (break), mem_cgroup_iter_break() must
1301  * be used for reference counting.
1302  */
1303 #define for_each_mem_cgroup_tree(iter, root)		\
1304 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1305 	     iter != NULL;				\
1306 	     iter = mem_cgroup_iter(root, iter, NULL))
1307 
1308 #define for_each_mem_cgroup(iter)			\
1309 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1310 	     iter != NULL;				\
1311 	     iter = mem_cgroup_iter(NULL, iter, NULL))
1312 
1313 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1314 {
1315 	struct mem_cgroup *memcg;
1316 
1317 	rcu_read_lock();
1318 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1319 	if (unlikely(!memcg))
1320 		goto out;
1321 
1322 	switch (idx) {
1323 	case PGFAULT:
1324 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1325 		break;
1326 	case PGMAJFAULT:
1327 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1328 		break;
1329 	default:
1330 		BUG();
1331 	}
1332 out:
1333 	rcu_read_unlock();
1334 }
1335 EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1336 
1337 /**
1338  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1339  * @zone: zone of the wanted lruvec
1340  * @memcg: memcg of the wanted lruvec
1341  *
1342  * Returns the lru list vector holding pages for the given @zone and
1343  * @mem.  This can be the global zone lruvec, if the memory controller
1344  * is disabled.
1345  */
1346 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1347 				      struct mem_cgroup *memcg)
1348 {
1349 	struct mem_cgroup_per_zone *mz;
1350 	struct lruvec *lruvec;
1351 
1352 	if (mem_cgroup_disabled()) {
1353 		lruvec = &zone->lruvec;
1354 		goto out;
1355 	}
1356 
1357 	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1358 	lruvec = &mz->lruvec;
1359 out:
1360 	/*
1361 	 * Since a node can be onlined after the mem_cgroup was created,
1362 	 * we have to be prepared to initialize lruvec->zone here;
1363 	 * and if offlined then reonlined, we need to reinitialize it.
1364 	 */
1365 	if (unlikely(lruvec->zone != zone))
1366 		lruvec->zone = zone;
1367 	return lruvec;
1368 }
1369 
1370 /*
1371  * Following LRU functions are allowed to be used without PCG_LOCK.
1372  * Operations are called by routine of global LRU independently from memcg.
1373  * What we have to take care of here is validness of pc->mem_cgroup.
1374  *
1375  * Changes to pc->mem_cgroup happens when
1376  * 1. charge
1377  * 2. moving account
1378  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1379  * It is added to LRU before charge.
1380  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1381  * When moving account, the page is not on LRU. It's isolated.
1382  */
1383 
1384 /**
1385  * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1386  * @page: the page
1387  * @zone: zone of the page
1388  */
1389 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1390 {
1391 	struct mem_cgroup_per_zone *mz;
1392 	struct mem_cgroup *memcg;
1393 	struct page_cgroup *pc;
1394 	struct lruvec *lruvec;
1395 
1396 	if (mem_cgroup_disabled()) {
1397 		lruvec = &zone->lruvec;
1398 		goto out;
1399 	}
1400 
1401 	pc = lookup_page_cgroup(page);
1402 	memcg = pc->mem_cgroup;
1403 
1404 	/*
1405 	 * Surreptitiously switch any uncharged offlist page to root:
1406 	 * an uncharged page off lru does nothing to secure
1407 	 * its former mem_cgroup from sudden removal.
1408 	 *
1409 	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1410 	 * under page_cgroup lock: between them, they make all uses
1411 	 * of pc->mem_cgroup safe.
1412 	 */
1413 	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1414 		pc->mem_cgroup = memcg = root_mem_cgroup;
1415 
1416 	mz = page_cgroup_zoneinfo(memcg, page);
1417 	lruvec = &mz->lruvec;
1418 out:
1419 	/*
1420 	 * Since a node can be onlined after the mem_cgroup was created,
1421 	 * we have to be prepared to initialize lruvec->zone here;
1422 	 * and if offlined then reonlined, we need to reinitialize it.
1423 	 */
1424 	if (unlikely(lruvec->zone != zone))
1425 		lruvec->zone = zone;
1426 	return lruvec;
1427 }
1428 
1429 /**
1430  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431  * @lruvec: mem_cgroup per zone lru vector
1432  * @lru: index of lru list the page is sitting on
1433  * @nr_pages: positive when adding or negative when removing
1434  *
1435  * This function must be called when a page is added to or removed from an
1436  * lru list.
1437  */
1438 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439 				int nr_pages)
1440 {
1441 	struct mem_cgroup_per_zone *mz;
1442 	unsigned long *lru_size;
1443 
1444 	if (mem_cgroup_disabled())
1445 		return;
1446 
1447 	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1448 	lru_size = mz->lru_size + lru;
1449 	*lru_size += nr_pages;
1450 	VM_BUG_ON((long)(*lru_size) < 0);
1451 }
1452 
1453 /*
1454  * Checks whether given mem is same or in the root_mem_cgroup's
1455  * hierarchy subtree
1456  */
1457 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1458 				  struct mem_cgroup *memcg)
1459 {
1460 	if (root_memcg == memcg)
1461 		return true;
1462 	if (!root_memcg->use_hierarchy || !memcg)
1463 		return false;
1464 	return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1465 }
1466 
1467 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1468 				       struct mem_cgroup *memcg)
1469 {
1470 	bool ret;
1471 
1472 	rcu_read_lock();
1473 	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1474 	rcu_read_unlock();
1475 	return ret;
1476 }
1477 
1478 bool task_in_mem_cgroup(struct task_struct *task,
1479 			const struct mem_cgroup *memcg)
1480 {
1481 	struct mem_cgroup *curr = NULL;
1482 	struct task_struct *p;
1483 	bool ret;
1484 
1485 	p = find_lock_task_mm(task);
1486 	if (p) {
1487 		curr = get_mem_cgroup_from_mm(p->mm);
1488 		task_unlock(p);
1489 	} else {
1490 		/*
1491 		 * All threads may have already detached their mm's, but the oom
1492 		 * killer still needs to detect if they have already been oom
1493 		 * killed to prevent needlessly killing additional tasks.
1494 		 */
1495 		rcu_read_lock();
1496 		curr = mem_cgroup_from_task(task);
1497 		if (curr)
1498 			css_get(&curr->css);
1499 		rcu_read_unlock();
1500 	}
1501 	/*
1502 	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1503 	 * use_hierarchy of "curr" here make this function true if hierarchy is
1504 	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1505 	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1506 	 */
1507 	ret = mem_cgroup_same_or_subtree(memcg, curr);
1508 	css_put(&curr->css);
1509 	return ret;
1510 }
1511 
1512 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1513 {
1514 	unsigned long inactive_ratio;
1515 	unsigned long inactive;
1516 	unsigned long active;
1517 	unsigned long gb;
1518 
1519 	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1520 	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1521 
1522 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1523 	if (gb)
1524 		inactive_ratio = int_sqrt(10 * gb);
1525 	else
1526 		inactive_ratio = 1;
1527 
1528 	return inactive * inactive_ratio < active;
1529 }
1530 
1531 #define mem_cgroup_from_res_counter(counter, member)	\
1532 	container_of(counter, struct mem_cgroup, member)
1533 
1534 /**
1535  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1536  * @memcg: the memory cgroup
1537  *
1538  * Returns the maximum amount of memory @mem can be charged with, in
1539  * pages.
1540  */
1541 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1542 {
1543 	unsigned long long margin;
1544 
1545 	margin = res_counter_margin(&memcg->res);
1546 	if (do_swap_account)
1547 		margin = min(margin, res_counter_margin(&memcg->memsw));
1548 	return margin >> PAGE_SHIFT;
1549 }
1550 
1551 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1552 {
1553 	/* root ? */
1554 	if (!css_parent(&memcg->css))
1555 		return vm_swappiness;
1556 
1557 	return memcg->swappiness;
1558 }
1559 
1560 /*
1561  * memcg->moving_account is used for checking possibility that some thread is
1562  * calling move_account(). When a thread on CPU-A starts moving pages under
1563  * a memcg, other threads should check memcg->moving_account under
1564  * rcu_read_lock(), like this:
1565  *
1566  *         CPU-A                                    CPU-B
1567  *                                              rcu_read_lock()
1568  *         memcg->moving_account+1              if (memcg->mocing_account)
1569  *                                                   take heavy locks.
1570  *         synchronize_rcu()                    update something.
1571  *                                              rcu_read_unlock()
1572  *         start move here.
1573  */
1574 
1575 /* for quick checking without looking up memcg */
1576 atomic_t memcg_moving __read_mostly;
1577 
1578 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1579 {
1580 	atomic_inc(&memcg_moving);
1581 	atomic_inc(&memcg->moving_account);
1582 	synchronize_rcu();
1583 }
1584 
1585 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1586 {
1587 	/*
1588 	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1589 	 * We check NULL in callee rather than caller.
1590 	 */
1591 	if (memcg) {
1592 		atomic_dec(&memcg_moving);
1593 		atomic_dec(&memcg->moving_account);
1594 	}
1595 }
1596 
1597 /*
1598  * 2 routines for checking "mem" is under move_account() or not.
1599  *
1600  * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
1601  *			  is used for avoiding races in accounting.  If true,
1602  *			  pc->mem_cgroup may be overwritten.
1603  *
1604  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1605  *			  under hierarchy of moving cgroups. This is for
1606  *			  waiting at hith-memory prressure caused by "move".
1607  */
1608 
1609 static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1610 {
1611 	VM_BUG_ON(!rcu_read_lock_held());
1612 	return atomic_read(&memcg->moving_account) > 0;
1613 }
1614 
1615 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1616 {
1617 	struct mem_cgroup *from;
1618 	struct mem_cgroup *to;
1619 	bool ret = false;
1620 	/*
1621 	 * Unlike task_move routines, we access mc.to, mc.from not under
1622 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1623 	 */
1624 	spin_lock(&mc.lock);
1625 	from = mc.from;
1626 	to = mc.to;
1627 	if (!from)
1628 		goto unlock;
1629 
1630 	ret = mem_cgroup_same_or_subtree(memcg, from)
1631 		|| mem_cgroup_same_or_subtree(memcg, to);
1632 unlock:
1633 	spin_unlock(&mc.lock);
1634 	return ret;
1635 }
1636 
1637 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1638 {
1639 	if (mc.moving_task && current != mc.moving_task) {
1640 		if (mem_cgroup_under_move(memcg)) {
1641 			DEFINE_WAIT(wait);
1642 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1643 			/* moving charge context might have finished. */
1644 			if (mc.moving_task)
1645 				schedule();
1646 			finish_wait(&mc.waitq, &wait);
1647 			return true;
1648 		}
1649 	}
1650 	return false;
1651 }
1652 
1653 /*
1654  * Take this lock when
1655  * - a code tries to modify page's memcg while it's USED.
1656  * - a code tries to modify page state accounting in a memcg.
1657  * see mem_cgroup_stolen(), too.
1658  */
1659 static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1660 				  unsigned long *flags)
1661 {
1662 	spin_lock_irqsave(&memcg->move_lock, *flags);
1663 }
1664 
1665 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1666 				unsigned long *flags)
1667 {
1668 	spin_unlock_irqrestore(&memcg->move_lock, *flags);
1669 }
1670 
1671 #define K(x) ((x) << (PAGE_SHIFT-10))
1672 /**
1673  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1674  * @memcg: The memory cgroup that went over limit
1675  * @p: Task that is going to be killed
1676  *
1677  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1678  * enabled
1679  */
1680 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1681 {
1682 	/* oom_info_lock ensures that parallel ooms do not interleave */
1683 	static DEFINE_MUTEX(oom_info_lock);
1684 	struct mem_cgroup *iter;
1685 	unsigned int i;
1686 
1687 	if (!p)
1688 		return;
1689 
1690 	mutex_lock(&oom_info_lock);
1691 	rcu_read_lock();
1692 
1693 	pr_info("Task in ");
1694 	pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1695 	pr_info(" killed as a result of limit of ");
1696 	pr_cont_cgroup_path(memcg->css.cgroup);
1697 	pr_info("\n");
1698 
1699 	rcu_read_unlock();
1700 
1701 	pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1702 		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1703 		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1704 		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1705 	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1706 		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1707 		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1708 		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1709 	pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1710 		res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1711 		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1712 		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1713 
1714 	for_each_mem_cgroup_tree(iter, memcg) {
1715 		pr_info("Memory cgroup stats for ");
1716 		pr_cont_cgroup_path(iter->css.cgroup);
1717 		pr_cont(":");
1718 
1719 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1720 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1721 				continue;
1722 			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1723 				K(mem_cgroup_read_stat(iter, i)));
1724 		}
1725 
1726 		for (i = 0; i < NR_LRU_LISTS; i++)
1727 			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1728 				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1729 
1730 		pr_cont("\n");
1731 	}
1732 	mutex_unlock(&oom_info_lock);
1733 }
1734 
1735 /*
1736  * This function returns the number of memcg under hierarchy tree. Returns
1737  * 1(self count) if no children.
1738  */
1739 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1740 {
1741 	int num = 0;
1742 	struct mem_cgroup *iter;
1743 
1744 	for_each_mem_cgroup_tree(iter, memcg)
1745 		num++;
1746 	return num;
1747 }
1748 
1749 /*
1750  * Return the memory (and swap, if configured) limit for a memcg.
1751  */
1752 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1753 {
1754 	u64 limit;
1755 
1756 	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1757 
1758 	/*
1759 	 * Do not consider swap space if we cannot swap due to swappiness
1760 	 */
1761 	if (mem_cgroup_swappiness(memcg)) {
1762 		u64 memsw;
1763 
1764 		limit += total_swap_pages << PAGE_SHIFT;
1765 		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1766 
1767 		/*
1768 		 * If memsw is finite and limits the amount of swap space
1769 		 * available to this memcg, return that limit.
1770 		 */
1771 		limit = min(limit, memsw);
1772 	}
1773 
1774 	return limit;
1775 }
1776 
1777 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1778 				     int order)
1779 {
1780 	struct mem_cgroup *iter;
1781 	unsigned long chosen_points = 0;
1782 	unsigned long totalpages;
1783 	unsigned int points = 0;
1784 	struct task_struct *chosen = NULL;
1785 
1786 	/*
1787 	 * If current has a pending SIGKILL or is exiting, then automatically
1788 	 * select it.  The goal is to allow it to allocate so that it may
1789 	 * quickly exit and free its memory.
1790 	 */
1791 	if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1792 		set_thread_flag(TIF_MEMDIE);
1793 		return;
1794 	}
1795 
1796 	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1797 	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1798 	for_each_mem_cgroup_tree(iter, memcg) {
1799 		struct css_task_iter it;
1800 		struct task_struct *task;
1801 
1802 		css_task_iter_start(&iter->css, &it);
1803 		while ((task = css_task_iter_next(&it))) {
1804 			switch (oom_scan_process_thread(task, totalpages, NULL,
1805 							false)) {
1806 			case OOM_SCAN_SELECT:
1807 				if (chosen)
1808 					put_task_struct(chosen);
1809 				chosen = task;
1810 				chosen_points = ULONG_MAX;
1811 				get_task_struct(chosen);
1812 				/* fall through */
1813 			case OOM_SCAN_CONTINUE:
1814 				continue;
1815 			case OOM_SCAN_ABORT:
1816 				css_task_iter_end(&it);
1817 				mem_cgroup_iter_break(memcg, iter);
1818 				if (chosen)
1819 					put_task_struct(chosen);
1820 				return;
1821 			case OOM_SCAN_OK:
1822 				break;
1823 			};
1824 			points = oom_badness(task, memcg, NULL, totalpages);
1825 			if (!points || points < chosen_points)
1826 				continue;
1827 			/* Prefer thread group leaders for display purposes */
1828 			if (points == chosen_points &&
1829 			    thread_group_leader(chosen))
1830 				continue;
1831 
1832 			if (chosen)
1833 				put_task_struct(chosen);
1834 			chosen = task;
1835 			chosen_points = points;
1836 			get_task_struct(chosen);
1837 		}
1838 		css_task_iter_end(&it);
1839 	}
1840 
1841 	if (!chosen)
1842 		return;
1843 	points = chosen_points * 1000 / totalpages;
1844 	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1845 			 NULL, "Memory cgroup out of memory");
1846 }
1847 
1848 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1849 					gfp_t gfp_mask,
1850 					unsigned long flags)
1851 {
1852 	unsigned long total = 0;
1853 	bool noswap = false;
1854 	int loop;
1855 
1856 	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1857 		noswap = true;
1858 	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1859 		noswap = true;
1860 
1861 	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1862 		if (loop)
1863 			drain_all_stock_async(memcg);
1864 		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1865 		/*
1866 		 * Allow limit shrinkers, which are triggered directly
1867 		 * by userspace, to catch signals and stop reclaim
1868 		 * after minimal progress, regardless of the margin.
1869 		 */
1870 		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1871 			break;
1872 		if (mem_cgroup_margin(memcg))
1873 			break;
1874 		/*
1875 		 * If nothing was reclaimed after two attempts, there
1876 		 * may be no reclaimable pages in this hierarchy.
1877 		 */
1878 		if (loop && !total)
1879 			break;
1880 	}
1881 	return total;
1882 }
1883 
1884 /**
1885  * test_mem_cgroup_node_reclaimable
1886  * @memcg: the target memcg
1887  * @nid: the node ID to be checked.
1888  * @noswap : specify true here if the user wants flle only information.
1889  *
1890  * This function returns whether the specified memcg contains any
1891  * reclaimable pages on a node. Returns true if there are any reclaimable
1892  * pages in the node.
1893  */
1894 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1895 		int nid, bool noswap)
1896 {
1897 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1898 		return true;
1899 	if (noswap || !total_swap_pages)
1900 		return false;
1901 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1902 		return true;
1903 	return false;
1904 
1905 }
1906 #if MAX_NUMNODES > 1
1907 
1908 /*
1909  * Always updating the nodemask is not very good - even if we have an empty
1910  * list or the wrong list here, we can start from some node and traverse all
1911  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1912  *
1913  */
1914 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1915 {
1916 	int nid;
1917 	/*
1918 	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1919 	 * pagein/pageout changes since the last update.
1920 	 */
1921 	if (!atomic_read(&memcg->numainfo_events))
1922 		return;
1923 	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1924 		return;
1925 
1926 	/* make a nodemask where this memcg uses memory from */
1927 	memcg->scan_nodes = node_states[N_MEMORY];
1928 
1929 	for_each_node_mask(nid, node_states[N_MEMORY]) {
1930 
1931 		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1932 			node_clear(nid, memcg->scan_nodes);
1933 	}
1934 
1935 	atomic_set(&memcg->numainfo_events, 0);
1936 	atomic_set(&memcg->numainfo_updating, 0);
1937 }
1938 
1939 /*
1940  * Selecting a node where we start reclaim from. Because what we need is just
1941  * reducing usage counter, start from anywhere is O,K. Considering
1942  * memory reclaim from current node, there are pros. and cons.
1943  *
1944  * Freeing memory from current node means freeing memory from a node which
1945  * we'll use or we've used. So, it may make LRU bad. And if several threads
1946  * hit limits, it will see a contention on a node. But freeing from remote
1947  * node means more costs for memory reclaim because of memory latency.
1948  *
1949  * Now, we use round-robin. Better algorithm is welcomed.
1950  */
1951 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1952 {
1953 	int node;
1954 
1955 	mem_cgroup_may_update_nodemask(memcg);
1956 	node = memcg->last_scanned_node;
1957 
1958 	node = next_node(node, memcg->scan_nodes);
1959 	if (node == MAX_NUMNODES)
1960 		node = first_node(memcg->scan_nodes);
1961 	/*
1962 	 * We call this when we hit limit, not when pages are added to LRU.
1963 	 * No LRU may hold pages because all pages are UNEVICTABLE or
1964 	 * memcg is too small and all pages are not on LRU. In that case,
1965 	 * we use curret node.
1966 	 */
1967 	if (unlikely(node == MAX_NUMNODES))
1968 		node = numa_node_id();
1969 
1970 	memcg->last_scanned_node = node;
1971 	return node;
1972 }
1973 
1974 /*
1975  * Check all nodes whether it contains reclaimable pages or not.
1976  * For quick scan, we make use of scan_nodes. This will allow us to skip
1977  * unused nodes. But scan_nodes is lazily updated and may not cotain
1978  * enough new information. We need to do double check.
1979  */
1980 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1981 {
1982 	int nid;
1983 
1984 	/*
1985 	 * quick check...making use of scan_node.
1986 	 * We can skip unused nodes.
1987 	 */
1988 	if (!nodes_empty(memcg->scan_nodes)) {
1989 		for (nid = first_node(memcg->scan_nodes);
1990 		     nid < MAX_NUMNODES;
1991 		     nid = next_node(nid, memcg->scan_nodes)) {
1992 
1993 			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1994 				return true;
1995 		}
1996 	}
1997 	/*
1998 	 * Check rest of nodes.
1999 	 */
2000 	for_each_node_state(nid, N_MEMORY) {
2001 		if (node_isset(nid, memcg->scan_nodes))
2002 			continue;
2003 		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
2004 			return true;
2005 	}
2006 	return false;
2007 }
2008 
2009 #else
2010 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
2011 {
2012 	return 0;
2013 }
2014 
2015 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
2016 {
2017 	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
2018 }
2019 #endif
2020 
2021 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2022 				   struct zone *zone,
2023 				   gfp_t gfp_mask,
2024 				   unsigned long *total_scanned)
2025 {
2026 	struct mem_cgroup *victim = NULL;
2027 	int total = 0;
2028 	int loop = 0;
2029 	unsigned long excess;
2030 	unsigned long nr_scanned;
2031 	struct mem_cgroup_reclaim_cookie reclaim = {
2032 		.zone = zone,
2033 		.priority = 0,
2034 	};
2035 
2036 	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2037 
2038 	while (1) {
2039 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2040 		if (!victim) {
2041 			loop++;
2042 			if (loop >= 2) {
2043 				/*
2044 				 * If we have not been able to reclaim
2045 				 * anything, it might because there are
2046 				 * no reclaimable pages under this hierarchy
2047 				 */
2048 				if (!total)
2049 					break;
2050 				/*
2051 				 * We want to do more targeted reclaim.
2052 				 * excess >> 2 is not to excessive so as to
2053 				 * reclaim too much, nor too less that we keep
2054 				 * coming back to reclaim from this cgroup
2055 				 */
2056 				if (total >= (excess >> 2) ||
2057 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2058 					break;
2059 			}
2060 			continue;
2061 		}
2062 		if (!mem_cgroup_reclaimable(victim, false))
2063 			continue;
2064 		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2065 						     zone, &nr_scanned);
2066 		*total_scanned += nr_scanned;
2067 		if (!res_counter_soft_limit_excess(&root_memcg->res))
2068 			break;
2069 	}
2070 	mem_cgroup_iter_break(root_memcg, victim);
2071 	return total;
2072 }
2073 
2074 #ifdef CONFIG_LOCKDEP
2075 static struct lockdep_map memcg_oom_lock_dep_map = {
2076 	.name = "memcg_oom_lock",
2077 };
2078 #endif
2079 
2080 static DEFINE_SPINLOCK(memcg_oom_lock);
2081 
2082 /*
2083  * Check OOM-Killer is already running under our hierarchy.
2084  * If someone is running, return false.
2085  */
2086 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2087 {
2088 	struct mem_cgroup *iter, *failed = NULL;
2089 
2090 	spin_lock(&memcg_oom_lock);
2091 
2092 	for_each_mem_cgroup_tree(iter, memcg) {
2093 		if (iter->oom_lock) {
2094 			/*
2095 			 * this subtree of our hierarchy is already locked
2096 			 * so we cannot give a lock.
2097 			 */
2098 			failed = iter;
2099 			mem_cgroup_iter_break(memcg, iter);
2100 			break;
2101 		} else
2102 			iter->oom_lock = true;
2103 	}
2104 
2105 	if (failed) {
2106 		/*
2107 		 * OK, we failed to lock the whole subtree so we have
2108 		 * to clean up what we set up to the failing subtree
2109 		 */
2110 		for_each_mem_cgroup_tree(iter, memcg) {
2111 			if (iter == failed) {
2112 				mem_cgroup_iter_break(memcg, iter);
2113 				break;
2114 			}
2115 			iter->oom_lock = false;
2116 		}
2117 	} else
2118 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2119 
2120 	spin_unlock(&memcg_oom_lock);
2121 
2122 	return !failed;
2123 }
2124 
2125 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2126 {
2127 	struct mem_cgroup *iter;
2128 
2129 	spin_lock(&memcg_oom_lock);
2130 	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2131 	for_each_mem_cgroup_tree(iter, memcg)
2132 		iter->oom_lock = false;
2133 	spin_unlock(&memcg_oom_lock);
2134 }
2135 
2136 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2137 {
2138 	struct mem_cgroup *iter;
2139 
2140 	for_each_mem_cgroup_tree(iter, memcg)
2141 		atomic_inc(&iter->under_oom);
2142 }
2143 
2144 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2145 {
2146 	struct mem_cgroup *iter;
2147 
2148 	/*
2149 	 * When a new child is created while the hierarchy is under oom,
2150 	 * mem_cgroup_oom_lock() may not be called. We have to use
2151 	 * atomic_add_unless() here.
2152 	 */
2153 	for_each_mem_cgroup_tree(iter, memcg)
2154 		atomic_add_unless(&iter->under_oom, -1, 0);
2155 }
2156 
2157 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2158 
2159 struct oom_wait_info {
2160 	struct mem_cgroup *memcg;
2161 	wait_queue_t	wait;
2162 };
2163 
2164 static int memcg_oom_wake_function(wait_queue_t *wait,
2165 	unsigned mode, int sync, void *arg)
2166 {
2167 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2168 	struct mem_cgroup *oom_wait_memcg;
2169 	struct oom_wait_info *oom_wait_info;
2170 
2171 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2172 	oom_wait_memcg = oom_wait_info->memcg;
2173 
2174 	/*
2175 	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2176 	 * Then we can use css_is_ancestor without taking care of RCU.
2177 	 */
2178 	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2179 		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2180 		return 0;
2181 	return autoremove_wake_function(wait, mode, sync, arg);
2182 }
2183 
2184 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2185 {
2186 	atomic_inc(&memcg->oom_wakeups);
2187 	/* for filtering, pass "memcg" as argument. */
2188 	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2189 }
2190 
2191 static void memcg_oom_recover(struct mem_cgroup *memcg)
2192 {
2193 	if (memcg && atomic_read(&memcg->under_oom))
2194 		memcg_wakeup_oom(memcg);
2195 }
2196 
2197 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2198 {
2199 	if (!current->memcg_oom.may_oom)
2200 		return;
2201 	/*
2202 	 * We are in the middle of the charge context here, so we
2203 	 * don't want to block when potentially sitting on a callstack
2204 	 * that holds all kinds of filesystem and mm locks.
2205 	 *
2206 	 * Also, the caller may handle a failed allocation gracefully
2207 	 * (like optional page cache readahead) and so an OOM killer
2208 	 * invocation might not even be necessary.
2209 	 *
2210 	 * That's why we don't do anything here except remember the
2211 	 * OOM context and then deal with it at the end of the page
2212 	 * fault when the stack is unwound, the locks are released,
2213 	 * and when we know whether the fault was overall successful.
2214 	 */
2215 	css_get(&memcg->css);
2216 	current->memcg_oom.memcg = memcg;
2217 	current->memcg_oom.gfp_mask = mask;
2218 	current->memcg_oom.order = order;
2219 }
2220 
2221 /**
2222  * mem_cgroup_oom_synchronize - complete memcg OOM handling
2223  * @handle: actually kill/wait or just clean up the OOM state
2224  *
2225  * This has to be called at the end of a page fault if the memcg OOM
2226  * handler was enabled.
2227  *
2228  * Memcg supports userspace OOM handling where failed allocations must
2229  * sleep on a waitqueue until the userspace task resolves the
2230  * situation.  Sleeping directly in the charge context with all kinds
2231  * of locks held is not a good idea, instead we remember an OOM state
2232  * in the task and mem_cgroup_oom_synchronize() has to be called at
2233  * the end of the page fault to complete the OOM handling.
2234  *
2235  * Returns %true if an ongoing memcg OOM situation was detected and
2236  * completed, %false otherwise.
2237  */
2238 bool mem_cgroup_oom_synchronize(bool handle)
2239 {
2240 	struct mem_cgroup *memcg = current->memcg_oom.memcg;
2241 	struct oom_wait_info owait;
2242 	bool locked;
2243 
2244 	/* OOM is global, do not handle */
2245 	if (!memcg)
2246 		return false;
2247 
2248 	if (!handle)
2249 		goto cleanup;
2250 
2251 	owait.memcg = memcg;
2252 	owait.wait.flags = 0;
2253 	owait.wait.func = memcg_oom_wake_function;
2254 	owait.wait.private = current;
2255 	INIT_LIST_HEAD(&owait.wait.task_list);
2256 
2257 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2258 	mem_cgroup_mark_under_oom(memcg);
2259 
2260 	locked = mem_cgroup_oom_trylock(memcg);
2261 
2262 	if (locked)
2263 		mem_cgroup_oom_notify(memcg);
2264 
2265 	if (locked && !memcg->oom_kill_disable) {
2266 		mem_cgroup_unmark_under_oom(memcg);
2267 		finish_wait(&memcg_oom_waitq, &owait.wait);
2268 		mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2269 					 current->memcg_oom.order);
2270 	} else {
2271 		schedule();
2272 		mem_cgroup_unmark_under_oom(memcg);
2273 		finish_wait(&memcg_oom_waitq, &owait.wait);
2274 	}
2275 
2276 	if (locked) {
2277 		mem_cgroup_oom_unlock(memcg);
2278 		/*
2279 		 * There is no guarantee that an OOM-lock contender
2280 		 * sees the wakeups triggered by the OOM kill
2281 		 * uncharges.  Wake any sleepers explicitely.
2282 		 */
2283 		memcg_oom_recover(memcg);
2284 	}
2285 cleanup:
2286 	current->memcg_oom.memcg = NULL;
2287 	css_put(&memcg->css);
2288 	return true;
2289 }
2290 
2291 /*
2292  * Currently used to update mapped file statistics, but the routine can be
2293  * generalized to update other statistics as well.
2294  *
2295  * Notes: Race condition
2296  *
2297  * We usually use page_cgroup_lock() for accessing page_cgroup member but
2298  * it tends to be costly. But considering some conditions, we doesn't need
2299  * to do so _always_.
2300  *
2301  * Considering "charge", lock_page_cgroup() is not required because all
2302  * file-stat operations happen after a page is attached to radix-tree. There
2303  * are no race with "charge".
2304  *
2305  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2306  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2307  * if there are race with "uncharge". Statistics itself is properly handled
2308  * by flags.
2309  *
2310  * Considering "move", this is an only case we see a race. To make the race
2311  * small, we check mm->moving_account and detect there are possibility of race
2312  * If there is, we take a lock.
2313  */
2314 
2315 void __mem_cgroup_begin_update_page_stat(struct page *page,
2316 				bool *locked, unsigned long *flags)
2317 {
2318 	struct mem_cgroup *memcg;
2319 	struct page_cgroup *pc;
2320 
2321 	pc = lookup_page_cgroup(page);
2322 again:
2323 	memcg = pc->mem_cgroup;
2324 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2325 		return;
2326 	/*
2327 	 * If this memory cgroup is not under account moving, we don't
2328 	 * need to take move_lock_mem_cgroup(). Because we already hold
2329 	 * rcu_read_lock(), any calls to move_account will be delayed until
2330 	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2331 	 */
2332 	if (!mem_cgroup_stolen(memcg))
2333 		return;
2334 
2335 	move_lock_mem_cgroup(memcg, flags);
2336 	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2337 		move_unlock_mem_cgroup(memcg, flags);
2338 		goto again;
2339 	}
2340 	*locked = true;
2341 }
2342 
2343 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2344 {
2345 	struct page_cgroup *pc = lookup_page_cgroup(page);
2346 
2347 	/*
2348 	 * It's guaranteed that pc->mem_cgroup never changes while
2349 	 * lock is held because a routine modifies pc->mem_cgroup
2350 	 * should take move_lock_mem_cgroup().
2351 	 */
2352 	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2353 }
2354 
2355 void mem_cgroup_update_page_stat(struct page *page,
2356 				 enum mem_cgroup_stat_index idx, int val)
2357 {
2358 	struct mem_cgroup *memcg;
2359 	struct page_cgroup *pc = lookup_page_cgroup(page);
2360 	unsigned long uninitialized_var(flags);
2361 
2362 	if (mem_cgroup_disabled())
2363 		return;
2364 
2365 	VM_BUG_ON(!rcu_read_lock_held());
2366 	memcg = pc->mem_cgroup;
2367 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2368 		return;
2369 
2370 	this_cpu_add(memcg->stat->count[idx], val);
2371 }
2372 
2373 /*
2374  * size of first charge trial. "32" comes from vmscan.c's magic value.
2375  * TODO: maybe necessary to use big numbers in big irons.
2376  */
2377 #define CHARGE_BATCH	32U
2378 struct memcg_stock_pcp {
2379 	struct mem_cgroup *cached; /* this never be root cgroup */
2380 	unsigned int nr_pages;
2381 	struct work_struct work;
2382 	unsigned long flags;
2383 #define FLUSHING_CACHED_CHARGE	0
2384 };
2385 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2386 static DEFINE_MUTEX(percpu_charge_mutex);
2387 
2388 /**
2389  * consume_stock: Try to consume stocked charge on this cpu.
2390  * @memcg: memcg to consume from.
2391  * @nr_pages: how many pages to charge.
2392  *
2393  * The charges will only happen if @memcg matches the current cpu's memcg
2394  * stock, and at least @nr_pages are available in that stock.  Failure to
2395  * service an allocation will refill the stock.
2396  *
2397  * returns true if successful, false otherwise.
2398  */
2399 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2400 {
2401 	struct memcg_stock_pcp *stock;
2402 	bool ret = true;
2403 
2404 	if (nr_pages > CHARGE_BATCH)
2405 		return false;
2406 
2407 	stock = &get_cpu_var(memcg_stock);
2408 	if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2409 		stock->nr_pages -= nr_pages;
2410 	else /* need to call res_counter_charge */
2411 		ret = false;
2412 	put_cpu_var(memcg_stock);
2413 	return ret;
2414 }
2415 
2416 /*
2417  * Returns stocks cached in percpu to res_counter and reset cached information.
2418  */
2419 static void drain_stock(struct memcg_stock_pcp *stock)
2420 {
2421 	struct mem_cgroup *old = stock->cached;
2422 
2423 	if (stock->nr_pages) {
2424 		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2425 
2426 		res_counter_uncharge(&old->res, bytes);
2427 		if (do_swap_account)
2428 			res_counter_uncharge(&old->memsw, bytes);
2429 		stock->nr_pages = 0;
2430 	}
2431 	stock->cached = NULL;
2432 }
2433 
2434 /*
2435  * This must be called under preempt disabled or must be called by
2436  * a thread which is pinned to local cpu.
2437  */
2438 static void drain_local_stock(struct work_struct *dummy)
2439 {
2440 	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2441 	drain_stock(stock);
2442 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2443 }
2444 
2445 static void __init memcg_stock_init(void)
2446 {
2447 	int cpu;
2448 
2449 	for_each_possible_cpu(cpu) {
2450 		struct memcg_stock_pcp *stock =
2451 					&per_cpu(memcg_stock, cpu);
2452 		INIT_WORK(&stock->work, drain_local_stock);
2453 	}
2454 }
2455 
2456 /*
2457  * Cache charges(val) which is from res_counter, to local per_cpu area.
2458  * This will be consumed by consume_stock() function, later.
2459  */
2460 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2461 {
2462 	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2463 
2464 	if (stock->cached != memcg) { /* reset if necessary */
2465 		drain_stock(stock);
2466 		stock->cached = memcg;
2467 	}
2468 	stock->nr_pages += nr_pages;
2469 	put_cpu_var(memcg_stock);
2470 }
2471 
2472 /*
2473  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2474  * of the hierarchy under it. sync flag says whether we should block
2475  * until the work is done.
2476  */
2477 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2478 {
2479 	int cpu, curcpu;
2480 
2481 	/* Notify other cpus that system-wide "drain" is running */
2482 	get_online_cpus();
2483 	curcpu = get_cpu();
2484 	for_each_online_cpu(cpu) {
2485 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2486 		struct mem_cgroup *memcg;
2487 
2488 		memcg = stock->cached;
2489 		if (!memcg || !stock->nr_pages)
2490 			continue;
2491 		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2492 			continue;
2493 		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2494 			if (cpu == curcpu)
2495 				drain_local_stock(&stock->work);
2496 			else
2497 				schedule_work_on(cpu, &stock->work);
2498 		}
2499 	}
2500 	put_cpu();
2501 
2502 	if (!sync)
2503 		goto out;
2504 
2505 	for_each_online_cpu(cpu) {
2506 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2507 		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2508 			flush_work(&stock->work);
2509 	}
2510 out:
2511 	put_online_cpus();
2512 }
2513 
2514 /*
2515  * Tries to drain stocked charges in other cpus. This function is asynchronous
2516  * and just put a work per cpu for draining localy on each cpu. Caller can
2517  * expects some charges will be back to res_counter later but cannot wait for
2518  * it.
2519  */
2520 static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2521 {
2522 	/*
2523 	 * If someone calls draining, avoid adding more kworker runs.
2524 	 */
2525 	if (!mutex_trylock(&percpu_charge_mutex))
2526 		return;
2527 	drain_all_stock(root_memcg, false);
2528 	mutex_unlock(&percpu_charge_mutex);
2529 }
2530 
2531 /* This is a synchronous drain interface. */
2532 static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2533 {
2534 	/* called when force_empty is called */
2535 	mutex_lock(&percpu_charge_mutex);
2536 	drain_all_stock(root_memcg, true);
2537 	mutex_unlock(&percpu_charge_mutex);
2538 }
2539 
2540 /*
2541  * This function drains percpu counter value from DEAD cpu and
2542  * move it to local cpu. Note that this function can be preempted.
2543  */
2544 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2545 {
2546 	int i;
2547 
2548 	spin_lock(&memcg->pcp_counter_lock);
2549 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2550 		long x = per_cpu(memcg->stat->count[i], cpu);
2551 
2552 		per_cpu(memcg->stat->count[i], cpu) = 0;
2553 		memcg->nocpu_base.count[i] += x;
2554 	}
2555 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2556 		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2557 
2558 		per_cpu(memcg->stat->events[i], cpu) = 0;
2559 		memcg->nocpu_base.events[i] += x;
2560 	}
2561 	spin_unlock(&memcg->pcp_counter_lock);
2562 }
2563 
2564 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2565 					unsigned long action,
2566 					void *hcpu)
2567 {
2568 	int cpu = (unsigned long)hcpu;
2569 	struct memcg_stock_pcp *stock;
2570 	struct mem_cgroup *iter;
2571 
2572 	if (action == CPU_ONLINE)
2573 		return NOTIFY_OK;
2574 
2575 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2576 		return NOTIFY_OK;
2577 
2578 	for_each_mem_cgroup(iter)
2579 		mem_cgroup_drain_pcp_counter(iter, cpu);
2580 
2581 	stock = &per_cpu(memcg_stock, cpu);
2582 	drain_stock(stock);
2583 	return NOTIFY_OK;
2584 }
2585 
2586 
2587 /* See mem_cgroup_try_charge() for details */
2588 enum {
2589 	CHARGE_OK,		/* success */
2590 	CHARGE_RETRY,		/* need to retry but retry is not bad */
2591 	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2592 	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2593 };
2594 
2595 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2596 				unsigned int nr_pages, unsigned int min_pages,
2597 				bool invoke_oom)
2598 {
2599 	unsigned long csize = nr_pages * PAGE_SIZE;
2600 	struct mem_cgroup *mem_over_limit;
2601 	struct res_counter *fail_res;
2602 	unsigned long flags = 0;
2603 	int ret;
2604 
2605 	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2606 
2607 	if (likely(!ret)) {
2608 		if (!do_swap_account)
2609 			return CHARGE_OK;
2610 		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2611 		if (likely(!ret))
2612 			return CHARGE_OK;
2613 
2614 		res_counter_uncharge(&memcg->res, csize);
2615 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2616 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2617 	} else
2618 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2619 	/*
2620 	 * Never reclaim on behalf of optional batching, retry with a
2621 	 * single page instead.
2622 	 */
2623 	if (nr_pages > min_pages)
2624 		return CHARGE_RETRY;
2625 
2626 	if (!(gfp_mask & __GFP_WAIT))
2627 		return CHARGE_WOULDBLOCK;
2628 
2629 	if (gfp_mask & __GFP_NORETRY)
2630 		return CHARGE_NOMEM;
2631 
2632 	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2633 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2634 		return CHARGE_RETRY;
2635 	/*
2636 	 * Even though the limit is exceeded at this point, reclaim
2637 	 * may have been able to free some pages.  Retry the charge
2638 	 * before killing the task.
2639 	 *
2640 	 * Only for regular pages, though: huge pages are rather
2641 	 * unlikely to succeed so close to the limit, and we fall back
2642 	 * to regular pages anyway in case of failure.
2643 	 */
2644 	if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2645 		return CHARGE_RETRY;
2646 
2647 	/*
2648 	 * At task move, charge accounts can be doubly counted. So, it's
2649 	 * better to wait until the end of task_move if something is going on.
2650 	 */
2651 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2652 		return CHARGE_RETRY;
2653 
2654 	if (invoke_oom)
2655 		mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
2656 
2657 	return CHARGE_NOMEM;
2658 }
2659 
2660 /**
2661  * mem_cgroup_try_charge - try charging a memcg
2662  * @memcg: memcg to charge
2663  * @nr_pages: number of pages to charge
2664  * @oom: trigger OOM if reclaim fails
2665  *
2666  * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2667  * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2668  */
2669 static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2670 				 gfp_t gfp_mask,
2671 				 unsigned int nr_pages,
2672 				 bool oom)
2673 {
2674 	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2675 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2676 	int ret;
2677 
2678 	if (mem_cgroup_is_root(memcg))
2679 		goto done;
2680 	/*
2681 	 * Unlike in global OOM situations, memcg is not in a physical
2682 	 * memory shortage.  Allow dying and OOM-killed tasks to
2683 	 * bypass the last charges so that they can exit quickly and
2684 	 * free their memory.
2685 	 */
2686 	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2687 		     fatal_signal_pending(current)))
2688 		goto bypass;
2689 
2690 	if (unlikely(task_in_memcg_oom(current)))
2691 		goto nomem;
2692 
2693 	if (gfp_mask & __GFP_NOFAIL)
2694 		oom = false;
2695 again:
2696 	if (consume_stock(memcg, nr_pages))
2697 		goto done;
2698 
2699 	do {
2700 		bool invoke_oom = oom && !nr_oom_retries;
2701 
2702 		/* If killed, bypass charge */
2703 		if (fatal_signal_pending(current))
2704 			goto bypass;
2705 
2706 		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2707 					   nr_pages, invoke_oom);
2708 		switch (ret) {
2709 		case CHARGE_OK:
2710 			break;
2711 		case CHARGE_RETRY: /* not in OOM situation but retry */
2712 			batch = nr_pages;
2713 			goto again;
2714 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2715 			goto nomem;
2716 		case CHARGE_NOMEM: /* OOM routine works */
2717 			if (!oom || invoke_oom)
2718 				goto nomem;
2719 			nr_oom_retries--;
2720 			break;
2721 		}
2722 	} while (ret != CHARGE_OK);
2723 
2724 	if (batch > nr_pages)
2725 		refill_stock(memcg, batch - nr_pages);
2726 done:
2727 	return 0;
2728 nomem:
2729 	if (!(gfp_mask & __GFP_NOFAIL))
2730 		return -ENOMEM;
2731 bypass:
2732 	return -EINTR;
2733 }
2734 
2735 /**
2736  * mem_cgroup_try_charge_mm - try charging a mm
2737  * @mm: mm_struct to charge
2738  * @nr_pages: number of pages to charge
2739  * @oom: trigger OOM if reclaim fails
2740  *
2741  * Returns the charged mem_cgroup associated with the given mm_struct or
2742  * NULL the charge failed.
2743  */
2744 static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2745 				 gfp_t gfp_mask,
2746 				 unsigned int nr_pages,
2747 				 bool oom)
2748 
2749 {
2750 	struct mem_cgroup *memcg;
2751 	int ret;
2752 
2753 	memcg = get_mem_cgroup_from_mm(mm);
2754 	ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2755 	css_put(&memcg->css);
2756 	if (ret == -EINTR)
2757 		memcg = root_mem_cgroup;
2758 	else if (ret)
2759 		memcg = NULL;
2760 
2761 	return memcg;
2762 }
2763 
2764 /*
2765  * Somemtimes we have to undo a charge we got by try_charge().
2766  * This function is for that and do uncharge, put css's refcnt.
2767  * gotten by try_charge().
2768  */
2769 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2770 				       unsigned int nr_pages)
2771 {
2772 	if (!mem_cgroup_is_root(memcg)) {
2773 		unsigned long bytes = nr_pages * PAGE_SIZE;
2774 
2775 		res_counter_uncharge(&memcg->res, bytes);
2776 		if (do_swap_account)
2777 			res_counter_uncharge(&memcg->memsw, bytes);
2778 	}
2779 }
2780 
2781 /*
2782  * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2783  * This is useful when moving usage to parent cgroup.
2784  */
2785 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2786 					unsigned int nr_pages)
2787 {
2788 	unsigned long bytes = nr_pages * PAGE_SIZE;
2789 
2790 	if (mem_cgroup_is_root(memcg))
2791 		return;
2792 
2793 	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2794 	if (do_swap_account)
2795 		res_counter_uncharge_until(&memcg->memsw,
2796 						memcg->memsw.parent, bytes);
2797 }
2798 
2799 /*
2800  * A helper function to get mem_cgroup from ID. must be called under
2801  * rcu_read_lock().  The caller is responsible for calling css_tryget if
2802  * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2803  * called against removed memcg.)
2804  */
2805 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2806 {
2807 	/* ID 0 is unused ID */
2808 	if (!id)
2809 		return NULL;
2810 	return mem_cgroup_from_id(id);
2811 }
2812 
2813 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2814 {
2815 	struct mem_cgroup *memcg = NULL;
2816 	struct page_cgroup *pc;
2817 	unsigned short id;
2818 	swp_entry_t ent;
2819 
2820 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2821 
2822 	pc = lookup_page_cgroup(page);
2823 	lock_page_cgroup(pc);
2824 	if (PageCgroupUsed(pc)) {
2825 		memcg = pc->mem_cgroup;
2826 		if (memcg && !css_tryget(&memcg->css))
2827 			memcg = NULL;
2828 	} else if (PageSwapCache(page)) {
2829 		ent.val = page_private(page);
2830 		id = lookup_swap_cgroup_id(ent);
2831 		rcu_read_lock();
2832 		memcg = mem_cgroup_lookup(id);
2833 		if (memcg && !css_tryget(&memcg->css))
2834 			memcg = NULL;
2835 		rcu_read_unlock();
2836 	}
2837 	unlock_page_cgroup(pc);
2838 	return memcg;
2839 }
2840 
2841 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2842 				       struct page *page,
2843 				       unsigned int nr_pages,
2844 				       enum charge_type ctype,
2845 				       bool lrucare)
2846 {
2847 	struct page_cgroup *pc = lookup_page_cgroup(page);
2848 	struct zone *uninitialized_var(zone);
2849 	struct lruvec *lruvec;
2850 	bool was_on_lru = false;
2851 	bool anon;
2852 
2853 	lock_page_cgroup(pc);
2854 	VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2855 	/*
2856 	 * we don't need page_cgroup_lock about tail pages, becase they are not
2857 	 * accessed by any other context at this point.
2858 	 */
2859 
2860 	/*
2861 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2862 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2863 	 */
2864 	if (lrucare) {
2865 		zone = page_zone(page);
2866 		spin_lock_irq(&zone->lru_lock);
2867 		if (PageLRU(page)) {
2868 			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2869 			ClearPageLRU(page);
2870 			del_page_from_lru_list(page, lruvec, page_lru(page));
2871 			was_on_lru = true;
2872 		}
2873 	}
2874 
2875 	pc->mem_cgroup = memcg;
2876 	/*
2877 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2878 	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2879 	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2880 	 * before USED bit, we need memory barrier here.
2881 	 * See mem_cgroup_add_lru_list(), etc.
2882 	 */
2883 	smp_wmb();
2884 	SetPageCgroupUsed(pc);
2885 
2886 	if (lrucare) {
2887 		if (was_on_lru) {
2888 			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2889 			VM_BUG_ON_PAGE(PageLRU(page), page);
2890 			SetPageLRU(page);
2891 			add_page_to_lru_list(page, lruvec, page_lru(page));
2892 		}
2893 		spin_unlock_irq(&zone->lru_lock);
2894 	}
2895 
2896 	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2897 		anon = true;
2898 	else
2899 		anon = false;
2900 
2901 	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2902 	unlock_page_cgroup(pc);
2903 
2904 	/*
2905 	 * "charge_statistics" updated event counter. Then, check it.
2906 	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2907 	 * if they exceeds softlimit.
2908 	 */
2909 	memcg_check_events(memcg, page);
2910 }
2911 
2912 static DEFINE_MUTEX(set_limit_mutex);
2913 
2914 #ifdef CONFIG_MEMCG_KMEM
2915 static DEFINE_MUTEX(activate_kmem_mutex);
2916 
2917 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2918 {
2919 	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2920 		memcg_kmem_is_active(memcg);
2921 }
2922 
2923 /*
2924  * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2925  * in the memcg_cache_params struct.
2926  */
2927 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2928 {
2929 	struct kmem_cache *cachep;
2930 
2931 	VM_BUG_ON(p->is_root_cache);
2932 	cachep = p->root_cache;
2933 	return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2934 }
2935 
2936 #ifdef CONFIG_SLABINFO
2937 static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2938 {
2939 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2940 	struct memcg_cache_params *params;
2941 
2942 	if (!memcg_can_account_kmem(memcg))
2943 		return -EIO;
2944 
2945 	print_slabinfo_header(m);
2946 
2947 	mutex_lock(&memcg->slab_caches_mutex);
2948 	list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2949 		cache_show(memcg_params_to_cache(params), m);
2950 	mutex_unlock(&memcg->slab_caches_mutex);
2951 
2952 	return 0;
2953 }
2954 #endif
2955 
2956 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2957 {
2958 	struct res_counter *fail_res;
2959 	int ret = 0;
2960 
2961 	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2962 	if (ret)
2963 		return ret;
2964 
2965 	ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2966 				    oom_gfp_allowed(gfp));
2967 	if (ret == -EINTR)  {
2968 		/*
2969 		 * mem_cgroup_try_charge() chosed to bypass to root due to
2970 		 * OOM kill or fatal signal.  Since our only options are to
2971 		 * either fail the allocation or charge it to this cgroup, do
2972 		 * it as a temporary condition. But we can't fail. From a
2973 		 * kmem/slab perspective, the cache has already been selected,
2974 		 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2975 		 * our minds.
2976 		 *
2977 		 * This condition will only trigger if the task entered
2978 		 * memcg_charge_kmem in a sane state, but was OOM-killed during
2979 		 * mem_cgroup_try_charge() above. Tasks that were already
2980 		 * dying when the allocation triggers should have been already
2981 		 * directed to the root cgroup in memcontrol.h
2982 		 */
2983 		res_counter_charge_nofail(&memcg->res, size, &fail_res);
2984 		if (do_swap_account)
2985 			res_counter_charge_nofail(&memcg->memsw, size,
2986 						  &fail_res);
2987 		ret = 0;
2988 	} else if (ret)
2989 		res_counter_uncharge(&memcg->kmem, size);
2990 
2991 	return ret;
2992 }
2993 
2994 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2995 {
2996 	res_counter_uncharge(&memcg->res, size);
2997 	if (do_swap_account)
2998 		res_counter_uncharge(&memcg->memsw, size);
2999 
3000 	/* Not down to 0 */
3001 	if (res_counter_uncharge(&memcg->kmem, size))
3002 		return;
3003 
3004 	/*
3005 	 * Releases a reference taken in kmem_cgroup_css_offline in case
3006 	 * this last uncharge is racing with the offlining code or it is
3007 	 * outliving the memcg existence.
3008 	 *
3009 	 * The memory barrier imposed by test&clear is paired with the
3010 	 * explicit one in memcg_kmem_mark_dead().
3011 	 */
3012 	if (memcg_kmem_test_and_clear_dead(memcg))
3013 		css_put(&memcg->css);
3014 }
3015 
3016 /*
3017  * helper for acessing a memcg's index. It will be used as an index in the
3018  * child cache array in kmem_cache, and also to derive its name. This function
3019  * will return -1 when this is not a kmem-limited memcg.
3020  */
3021 int memcg_cache_id(struct mem_cgroup *memcg)
3022 {
3023 	return memcg ? memcg->kmemcg_id : -1;
3024 }
3025 
3026 static size_t memcg_caches_array_size(int num_groups)
3027 {
3028 	ssize_t size;
3029 	if (num_groups <= 0)
3030 		return 0;
3031 
3032 	size = 2 * num_groups;
3033 	if (size < MEMCG_CACHES_MIN_SIZE)
3034 		size = MEMCG_CACHES_MIN_SIZE;
3035 	else if (size > MEMCG_CACHES_MAX_SIZE)
3036 		size = MEMCG_CACHES_MAX_SIZE;
3037 
3038 	return size;
3039 }
3040 
3041 /*
3042  * We should update the current array size iff all caches updates succeed. This
3043  * can only be done from the slab side. The slab mutex needs to be held when
3044  * calling this.
3045  */
3046 void memcg_update_array_size(int num)
3047 {
3048 	if (num > memcg_limited_groups_array_size)
3049 		memcg_limited_groups_array_size = memcg_caches_array_size(num);
3050 }
3051 
3052 static void kmem_cache_destroy_work_func(struct work_struct *w);
3053 
3054 int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3055 {
3056 	struct memcg_cache_params *cur_params = s->memcg_params;
3057 
3058 	VM_BUG_ON(!is_root_cache(s));
3059 
3060 	if (num_groups > memcg_limited_groups_array_size) {
3061 		int i;
3062 		struct memcg_cache_params *new_params;
3063 		ssize_t size = memcg_caches_array_size(num_groups);
3064 
3065 		size *= sizeof(void *);
3066 		size += offsetof(struct memcg_cache_params, memcg_caches);
3067 
3068 		new_params = kzalloc(size, GFP_KERNEL);
3069 		if (!new_params)
3070 			return -ENOMEM;
3071 
3072 		new_params->is_root_cache = true;
3073 
3074 		/*
3075 		 * There is the chance it will be bigger than
3076 		 * memcg_limited_groups_array_size, if we failed an allocation
3077 		 * in a cache, in which case all caches updated before it, will
3078 		 * have a bigger array.
3079 		 *
3080 		 * But if that is the case, the data after
3081 		 * memcg_limited_groups_array_size is certainly unused
3082 		 */
3083 		for (i = 0; i < memcg_limited_groups_array_size; i++) {
3084 			if (!cur_params->memcg_caches[i])
3085 				continue;
3086 			new_params->memcg_caches[i] =
3087 						cur_params->memcg_caches[i];
3088 		}
3089 
3090 		/*
3091 		 * Ideally, we would wait until all caches succeed, and only
3092 		 * then free the old one. But this is not worth the extra
3093 		 * pointer per-cache we'd have to have for this.
3094 		 *
3095 		 * It is not a big deal if some caches are left with a size
3096 		 * bigger than the others. And all updates will reset this
3097 		 * anyway.
3098 		 */
3099 		rcu_assign_pointer(s->memcg_params, new_params);
3100 		if (cur_params)
3101 			kfree_rcu(cur_params, rcu_head);
3102 	}
3103 	return 0;
3104 }
3105 
3106 char *memcg_create_cache_name(struct mem_cgroup *memcg,
3107 			      struct kmem_cache *root_cache)
3108 {
3109 	static char *buf = NULL;
3110 
3111 	/*
3112 	 * We need a mutex here to protect the shared buffer. Since this is
3113 	 * expected to be called only on cache creation, we can employ the
3114 	 * slab_mutex for that purpose.
3115 	 */
3116 	lockdep_assert_held(&slab_mutex);
3117 
3118 	if (!buf) {
3119 		buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3120 		if (!buf)
3121 			return NULL;
3122 	}
3123 
3124 	cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
3125 	return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
3126 			 memcg_cache_id(memcg), buf);
3127 }
3128 
3129 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3130 			     struct kmem_cache *root_cache)
3131 {
3132 	size_t size;
3133 
3134 	if (!memcg_kmem_enabled())
3135 		return 0;
3136 
3137 	if (!memcg) {
3138 		size = offsetof(struct memcg_cache_params, memcg_caches);
3139 		size += memcg_limited_groups_array_size * sizeof(void *);
3140 	} else
3141 		size = sizeof(struct memcg_cache_params);
3142 
3143 	s->memcg_params = kzalloc(size, GFP_KERNEL);
3144 	if (!s->memcg_params)
3145 		return -ENOMEM;
3146 
3147 	if (memcg) {
3148 		s->memcg_params->memcg = memcg;
3149 		s->memcg_params->root_cache = root_cache;
3150 		INIT_WORK(&s->memcg_params->destroy,
3151 				kmem_cache_destroy_work_func);
3152 		css_get(&memcg->css);
3153 	} else
3154 		s->memcg_params->is_root_cache = true;
3155 
3156 	return 0;
3157 }
3158 
3159 void memcg_free_cache_params(struct kmem_cache *s)
3160 {
3161 	if (!s->memcg_params)
3162 		return;
3163 	if (!s->memcg_params->is_root_cache)
3164 		css_put(&s->memcg_params->memcg->css);
3165 	kfree(s->memcg_params);
3166 }
3167 
3168 void memcg_register_cache(struct kmem_cache *s)
3169 {
3170 	struct kmem_cache *root;
3171 	struct mem_cgroup *memcg;
3172 	int id;
3173 
3174 	if (is_root_cache(s))
3175 		return;
3176 
3177 	/*
3178 	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3179 	 * array while we are modifying it.
3180 	 */
3181 	lockdep_assert_held(&slab_mutex);
3182 
3183 	root = s->memcg_params->root_cache;
3184 	memcg = s->memcg_params->memcg;
3185 	id = memcg_cache_id(memcg);
3186 
3187 	/*
3188 	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3189 	 * barrier here to ensure nobody will see the kmem_cache partially
3190 	 * initialized.
3191 	 */
3192 	smp_wmb();
3193 
3194 	/*
3195 	 * Initialize the pointer to this cache in its parent's memcg_params
3196 	 * before adding it to the memcg_slab_caches list, otherwise we can
3197 	 * fail to convert memcg_params_to_cache() while traversing the list.
3198 	 */
3199 	VM_BUG_ON(root->memcg_params->memcg_caches[id]);
3200 	root->memcg_params->memcg_caches[id] = s;
3201 
3202 	mutex_lock(&memcg->slab_caches_mutex);
3203 	list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3204 	mutex_unlock(&memcg->slab_caches_mutex);
3205 }
3206 
3207 void memcg_unregister_cache(struct kmem_cache *s)
3208 {
3209 	struct kmem_cache *root;
3210 	struct mem_cgroup *memcg;
3211 	int id;
3212 
3213 	if (is_root_cache(s))
3214 		return;
3215 
3216 	/*
3217 	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3218 	 * array while we are modifying it.
3219 	 */
3220 	lockdep_assert_held(&slab_mutex);
3221 
3222 	root = s->memcg_params->root_cache;
3223 	memcg = s->memcg_params->memcg;
3224 	id = memcg_cache_id(memcg);
3225 
3226 	mutex_lock(&memcg->slab_caches_mutex);
3227 	list_del(&s->memcg_params->list);
3228 	mutex_unlock(&memcg->slab_caches_mutex);
3229 
3230 	/*
3231 	 * Clear the pointer to this cache in its parent's memcg_params only
3232 	 * after removing it from the memcg_slab_caches list, otherwise we can
3233 	 * fail to convert memcg_params_to_cache() while traversing the list.
3234 	 */
3235 	VM_BUG_ON(root->memcg_params->memcg_caches[id] != s);
3236 	root->memcg_params->memcg_caches[id] = NULL;
3237 }
3238 
3239 /*
3240  * During the creation a new cache, we need to disable our accounting mechanism
3241  * altogether. This is true even if we are not creating, but rather just
3242  * enqueing new caches to be created.
3243  *
3244  * This is because that process will trigger allocations; some visible, like
3245  * explicit kmallocs to auxiliary data structures, name strings and internal
3246  * cache structures; some well concealed, like INIT_WORK() that can allocate
3247  * objects during debug.
3248  *
3249  * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3250  * to it. This may not be a bounded recursion: since the first cache creation
3251  * failed to complete (waiting on the allocation), we'll just try to create the
3252  * cache again, failing at the same point.
3253  *
3254  * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3255  * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3256  * inside the following two functions.
3257  */
3258 static inline void memcg_stop_kmem_account(void)
3259 {
3260 	VM_BUG_ON(!current->mm);
3261 	current->memcg_kmem_skip_account++;
3262 }
3263 
3264 static inline void memcg_resume_kmem_account(void)
3265 {
3266 	VM_BUG_ON(!current->mm);
3267 	current->memcg_kmem_skip_account--;
3268 }
3269 
3270 static void kmem_cache_destroy_work_func(struct work_struct *w)
3271 {
3272 	struct kmem_cache *cachep;
3273 	struct memcg_cache_params *p;
3274 
3275 	p = container_of(w, struct memcg_cache_params, destroy);
3276 
3277 	cachep = memcg_params_to_cache(p);
3278 
3279 	/*
3280 	 * If we get down to 0 after shrink, we could delete right away.
3281 	 * However, memcg_release_pages() already puts us back in the workqueue
3282 	 * in that case. If we proceed deleting, we'll get a dangling
3283 	 * reference, and removing the object from the workqueue in that case
3284 	 * is unnecessary complication. We are not a fast path.
3285 	 *
3286 	 * Note that this case is fundamentally different from racing with
3287 	 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3288 	 * kmem_cache_shrink, not only we would be reinserting a dead cache
3289 	 * into the queue, but doing so from inside the worker racing to
3290 	 * destroy it.
3291 	 *
3292 	 * So if we aren't down to zero, we'll just schedule a worker and try
3293 	 * again
3294 	 */
3295 	if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
3296 		kmem_cache_shrink(cachep);
3297 	else
3298 		kmem_cache_destroy(cachep);
3299 }
3300 
3301 void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3302 {
3303 	if (!cachep->memcg_params->dead)
3304 		return;
3305 
3306 	/*
3307 	 * There are many ways in which we can get here.
3308 	 *
3309 	 * We can get to a memory-pressure situation while the delayed work is
3310 	 * still pending to run. The vmscan shrinkers can then release all
3311 	 * cache memory and get us to destruction. If this is the case, we'll
3312 	 * be executed twice, which is a bug (the second time will execute over
3313 	 * bogus data). In this case, cancelling the work should be fine.
3314 	 *
3315 	 * But we can also get here from the worker itself, if
3316 	 * kmem_cache_shrink is enough to shake all the remaining objects and
3317 	 * get the page count to 0. In this case, we'll deadlock if we try to
3318 	 * cancel the work (the worker runs with an internal lock held, which
3319 	 * is the same lock we would hold for cancel_work_sync().)
3320 	 *
3321 	 * Since we can't possibly know who got us here, just refrain from
3322 	 * running if there is already work pending
3323 	 */
3324 	if (work_pending(&cachep->memcg_params->destroy))
3325 		return;
3326 	/*
3327 	 * We have to defer the actual destroying to a workqueue, because
3328 	 * we might currently be in a context that cannot sleep.
3329 	 */
3330 	schedule_work(&cachep->memcg_params->destroy);
3331 }
3332 
3333 int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3334 {
3335 	struct kmem_cache *c;
3336 	int i, failed = 0;
3337 
3338 	/*
3339 	 * If the cache is being destroyed, we trust that there is no one else
3340 	 * requesting objects from it. Even if there are, the sanity checks in
3341 	 * kmem_cache_destroy should caught this ill-case.
3342 	 *
3343 	 * Still, we don't want anyone else freeing memcg_caches under our
3344 	 * noses, which can happen if a new memcg comes to life. As usual,
3345 	 * we'll take the activate_kmem_mutex to protect ourselves against
3346 	 * this.
3347 	 */
3348 	mutex_lock(&activate_kmem_mutex);
3349 	for_each_memcg_cache_index(i) {
3350 		c = cache_from_memcg_idx(s, i);
3351 		if (!c)
3352 			continue;
3353 
3354 		/*
3355 		 * We will now manually delete the caches, so to avoid races
3356 		 * we need to cancel all pending destruction workers and
3357 		 * proceed with destruction ourselves.
3358 		 *
3359 		 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3360 		 * and that could spawn the workers again: it is likely that
3361 		 * the cache still have active pages until this very moment.
3362 		 * This would lead us back to mem_cgroup_destroy_cache.
3363 		 *
3364 		 * But that will not execute at all if the "dead" flag is not
3365 		 * set, so flip it down to guarantee we are in control.
3366 		 */
3367 		c->memcg_params->dead = false;
3368 		cancel_work_sync(&c->memcg_params->destroy);
3369 		kmem_cache_destroy(c);
3370 
3371 		if (cache_from_memcg_idx(s, i))
3372 			failed++;
3373 	}
3374 	mutex_unlock(&activate_kmem_mutex);
3375 	return failed;
3376 }
3377 
3378 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3379 {
3380 	struct kmem_cache *cachep;
3381 	struct memcg_cache_params *params;
3382 
3383 	if (!memcg_kmem_is_active(memcg))
3384 		return;
3385 
3386 	mutex_lock(&memcg->slab_caches_mutex);
3387 	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3388 		cachep = memcg_params_to_cache(params);
3389 		cachep->memcg_params->dead = true;
3390 		schedule_work(&cachep->memcg_params->destroy);
3391 	}
3392 	mutex_unlock(&memcg->slab_caches_mutex);
3393 }
3394 
3395 struct create_work {
3396 	struct mem_cgroup *memcg;
3397 	struct kmem_cache *cachep;
3398 	struct work_struct work;
3399 };
3400 
3401 static void memcg_create_cache_work_func(struct work_struct *w)
3402 {
3403 	struct create_work *cw = container_of(w, struct create_work, work);
3404 	struct mem_cgroup *memcg = cw->memcg;
3405 	struct kmem_cache *cachep = cw->cachep;
3406 
3407 	kmem_cache_create_memcg(memcg, cachep);
3408 	css_put(&memcg->css);
3409 	kfree(cw);
3410 }
3411 
3412 /*
3413  * Enqueue the creation of a per-memcg kmem_cache.
3414  */
3415 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3416 					 struct kmem_cache *cachep)
3417 {
3418 	struct create_work *cw;
3419 
3420 	cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3421 	if (cw == NULL) {
3422 		css_put(&memcg->css);
3423 		return;
3424 	}
3425 
3426 	cw->memcg = memcg;
3427 	cw->cachep = cachep;
3428 
3429 	INIT_WORK(&cw->work, memcg_create_cache_work_func);
3430 	schedule_work(&cw->work);
3431 }
3432 
3433 static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3434 				       struct kmem_cache *cachep)
3435 {
3436 	/*
3437 	 * We need to stop accounting when we kmalloc, because if the
3438 	 * corresponding kmalloc cache is not yet created, the first allocation
3439 	 * in __memcg_create_cache_enqueue will recurse.
3440 	 *
3441 	 * However, it is better to enclose the whole function. Depending on
3442 	 * the debugging options enabled, INIT_WORK(), for instance, can
3443 	 * trigger an allocation. This too, will make us recurse. Because at
3444 	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3445 	 * the safest choice is to do it like this, wrapping the whole function.
3446 	 */
3447 	memcg_stop_kmem_account();
3448 	__memcg_create_cache_enqueue(memcg, cachep);
3449 	memcg_resume_kmem_account();
3450 }
3451 /*
3452  * Return the kmem_cache we're supposed to use for a slab allocation.
3453  * We try to use the current memcg's version of the cache.
3454  *
3455  * If the cache does not exist yet, if we are the first user of it,
3456  * we either create it immediately, if possible, or create it asynchronously
3457  * in a workqueue.
3458  * In the latter case, we will let the current allocation go through with
3459  * the original cache.
3460  *
3461  * Can't be called in interrupt context or from kernel threads.
3462  * This function needs to be called with rcu_read_lock() held.
3463  */
3464 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3465 					  gfp_t gfp)
3466 {
3467 	struct mem_cgroup *memcg;
3468 	struct kmem_cache *memcg_cachep;
3469 
3470 	VM_BUG_ON(!cachep->memcg_params);
3471 	VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3472 
3473 	if (!current->mm || current->memcg_kmem_skip_account)
3474 		return cachep;
3475 
3476 	rcu_read_lock();
3477 	memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3478 
3479 	if (!memcg_can_account_kmem(memcg))
3480 		goto out;
3481 
3482 	memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3483 	if (likely(memcg_cachep)) {
3484 		cachep = memcg_cachep;
3485 		goto out;
3486 	}
3487 
3488 	/* The corresponding put will be done in the workqueue. */
3489 	if (!css_tryget(&memcg->css))
3490 		goto out;
3491 	rcu_read_unlock();
3492 
3493 	/*
3494 	 * If we are in a safe context (can wait, and not in interrupt
3495 	 * context), we could be be predictable and return right away.
3496 	 * This would guarantee that the allocation being performed
3497 	 * already belongs in the new cache.
3498 	 *
3499 	 * However, there are some clashes that can arrive from locking.
3500 	 * For instance, because we acquire the slab_mutex while doing
3501 	 * kmem_cache_dup, this means no further allocation could happen
3502 	 * with the slab_mutex held.
3503 	 *
3504 	 * Also, because cache creation issue get_online_cpus(), this
3505 	 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3506 	 * that ends up reversed during cpu hotplug. (cpuset allocates
3507 	 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3508 	 * better to defer everything.
3509 	 */
3510 	memcg_create_cache_enqueue(memcg, cachep);
3511 	return cachep;
3512 out:
3513 	rcu_read_unlock();
3514 	return cachep;
3515 }
3516 EXPORT_SYMBOL(__memcg_kmem_get_cache);
3517 
3518 /*
3519  * We need to verify if the allocation against current->mm->owner's memcg is
3520  * possible for the given order. But the page is not allocated yet, so we'll
3521  * need a further commit step to do the final arrangements.
3522  *
3523  * It is possible for the task to switch cgroups in this mean time, so at
3524  * commit time, we can't rely on task conversion any longer.  We'll then use
3525  * the handle argument to return to the caller which cgroup we should commit
3526  * against. We could also return the memcg directly and avoid the pointer
3527  * passing, but a boolean return value gives better semantics considering
3528  * the compiled-out case as well.
3529  *
3530  * Returning true means the allocation is possible.
3531  */
3532 bool
3533 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3534 {
3535 	struct mem_cgroup *memcg;
3536 	int ret;
3537 
3538 	*_memcg = NULL;
3539 
3540 	/*
3541 	 * Disabling accounting is only relevant for some specific memcg
3542 	 * internal allocations. Therefore we would initially not have such
3543 	 * check here, since direct calls to the page allocator that are marked
3544 	 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3545 	 * concerned with cache allocations, and by having this test at
3546 	 * memcg_kmem_get_cache, we are already able to relay the allocation to
3547 	 * the root cache and bypass the memcg cache altogether.
3548 	 *
3549 	 * There is one exception, though: the SLUB allocator does not create
3550 	 * large order caches, but rather service large kmallocs directly from
3551 	 * the page allocator. Therefore, the following sequence when backed by
3552 	 * the SLUB allocator:
3553 	 *
3554 	 *	memcg_stop_kmem_account();
3555 	 *	kmalloc(<large_number>)
3556 	 *	memcg_resume_kmem_account();
3557 	 *
3558 	 * would effectively ignore the fact that we should skip accounting,
3559 	 * since it will drive us directly to this function without passing
3560 	 * through the cache selector memcg_kmem_get_cache. Such large
3561 	 * allocations are extremely rare but can happen, for instance, for the
3562 	 * cache arrays. We bring this test here.
3563 	 */
3564 	if (!current->mm || current->memcg_kmem_skip_account)
3565 		return true;
3566 
3567 	memcg = get_mem_cgroup_from_mm(current->mm);
3568 
3569 	if (!memcg_can_account_kmem(memcg)) {
3570 		css_put(&memcg->css);
3571 		return true;
3572 	}
3573 
3574 	ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3575 	if (!ret)
3576 		*_memcg = memcg;
3577 
3578 	css_put(&memcg->css);
3579 	return (ret == 0);
3580 }
3581 
3582 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3583 			      int order)
3584 {
3585 	struct page_cgroup *pc;
3586 
3587 	VM_BUG_ON(mem_cgroup_is_root(memcg));
3588 
3589 	/* The page allocation failed. Revert */
3590 	if (!page) {
3591 		memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3592 		return;
3593 	}
3594 
3595 	pc = lookup_page_cgroup(page);
3596 	lock_page_cgroup(pc);
3597 	pc->mem_cgroup = memcg;
3598 	SetPageCgroupUsed(pc);
3599 	unlock_page_cgroup(pc);
3600 }
3601 
3602 void __memcg_kmem_uncharge_pages(struct page *page, int order)
3603 {
3604 	struct mem_cgroup *memcg = NULL;
3605 	struct page_cgroup *pc;
3606 
3607 
3608 	pc = lookup_page_cgroup(page);
3609 	/*
3610 	 * Fast unlocked return. Theoretically might have changed, have to
3611 	 * check again after locking.
3612 	 */
3613 	if (!PageCgroupUsed(pc))
3614 		return;
3615 
3616 	lock_page_cgroup(pc);
3617 	if (PageCgroupUsed(pc)) {
3618 		memcg = pc->mem_cgroup;
3619 		ClearPageCgroupUsed(pc);
3620 	}
3621 	unlock_page_cgroup(pc);
3622 
3623 	/*
3624 	 * We trust that only if there is a memcg associated with the page, it
3625 	 * is a valid allocation
3626 	 */
3627 	if (!memcg)
3628 		return;
3629 
3630 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3631 	memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3632 }
3633 #else
3634 static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3635 {
3636 }
3637 #endif /* CONFIG_MEMCG_KMEM */
3638 
3639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3640 
3641 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3642 /*
3643  * Because tail pages are not marked as "used", set it. We're under
3644  * zone->lru_lock, 'splitting on pmd' and compound_lock.
3645  * charge/uncharge will be never happen and move_account() is done under
3646  * compound_lock(), so we don't have to take care of races.
3647  */
3648 void mem_cgroup_split_huge_fixup(struct page *head)
3649 {
3650 	struct page_cgroup *head_pc = lookup_page_cgroup(head);
3651 	struct page_cgroup *pc;
3652 	struct mem_cgroup *memcg;
3653 	int i;
3654 
3655 	if (mem_cgroup_disabled())
3656 		return;
3657 
3658 	memcg = head_pc->mem_cgroup;
3659 	for (i = 1; i < HPAGE_PMD_NR; i++) {
3660 		pc = head_pc + i;
3661 		pc->mem_cgroup = memcg;
3662 		smp_wmb();/* see __commit_charge() */
3663 		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3664 	}
3665 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3666 		       HPAGE_PMD_NR);
3667 }
3668 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3669 
3670 /**
3671  * mem_cgroup_move_account - move account of the page
3672  * @page: the page
3673  * @nr_pages: number of regular pages (>1 for huge pages)
3674  * @pc:	page_cgroup of the page.
3675  * @from: mem_cgroup which the page is moved from.
3676  * @to:	mem_cgroup which the page is moved to. @from != @to.
3677  *
3678  * The caller must confirm following.
3679  * - page is not on LRU (isolate_page() is useful.)
3680  * - compound_lock is held when nr_pages > 1
3681  *
3682  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3683  * from old cgroup.
3684  */
3685 static int mem_cgroup_move_account(struct page *page,
3686 				   unsigned int nr_pages,
3687 				   struct page_cgroup *pc,
3688 				   struct mem_cgroup *from,
3689 				   struct mem_cgroup *to)
3690 {
3691 	unsigned long flags;
3692 	int ret;
3693 	bool anon = PageAnon(page);
3694 
3695 	VM_BUG_ON(from == to);
3696 	VM_BUG_ON_PAGE(PageLRU(page), page);
3697 	/*
3698 	 * The page is isolated from LRU. So, collapse function
3699 	 * will not handle this page. But page splitting can happen.
3700 	 * Do this check under compound_page_lock(). The caller should
3701 	 * hold it.
3702 	 */
3703 	ret = -EBUSY;
3704 	if (nr_pages > 1 && !PageTransHuge(page))
3705 		goto out;
3706 
3707 	lock_page_cgroup(pc);
3708 
3709 	ret = -EINVAL;
3710 	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3711 		goto unlock;
3712 
3713 	move_lock_mem_cgroup(from, &flags);
3714 
3715 	if (!anon && page_mapped(page)) {
3716 		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3717 			       nr_pages);
3718 		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3719 			       nr_pages);
3720 	}
3721 
3722 	if (PageWriteback(page)) {
3723 		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3724 			       nr_pages);
3725 		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3726 			       nr_pages);
3727 	}
3728 
3729 	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3730 
3731 	/* caller should have done css_get */
3732 	pc->mem_cgroup = to;
3733 	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3734 	move_unlock_mem_cgroup(from, &flags);
3735 	ret = 0;
3736 unlock:
3737 	unlock_page_cgroup(pc);
3738 	/*
3739 	 * check events
3740 	 */
3741 	memcg_check_events(to, page);
3742 	memcg_check_events(from, page);
3743 out:
3744 	return ret;
3745 }
3746 
3747 /**
3748  * mem_cgroup_move_parent - moves page to the parent group
3749  * @page: the page to move
3750  * @pc: page_cgroup of the page
3751  * @child: page's cgroup
3752  *
3753  * move charges to its parent or the root cgroup if the group has no
3754  * parent (aka use_hierarchy==0).
3755  * Although this might fail (get_page_unless_zero, isolate_lru_page or
3756  * mem_cgroup_move_account fails) the failure is always temporary and
3757  * it signals a race with a page removal/uncharge or migration. In the
3758  * first case the page is on the way out and it will vanish from the LRU
3759  * on the next attempt and the call should be retried later.
3760  * Isolation from the LRU fails only if page has been isolated from
3761  * the LRU since we looked at it and that usually means either global
3762  * reclaim or migration going on. The page will either get back to the
3763  * LRU or vanish.
3764  * Finaly mem_cgroup_move_account fails only if the page got uncharged
3765  * (!PageCgroupUsed) or moved to a different group. The page will
3766  * disappear in the next attempt.
3767  */
3768 static int mem_cgroup_move_parent(struct page *page,
3769 				  struct page_cgroup *pc,
3770 				  struct mem_cgroup *child)
3771 {
3772 	struct mem_cgroup *parent;
3773 	unsigned int nr_pages;
3774 	unsigned long uninitialized_var(flags);
3775 	int ret;
3776 
3777 	VM_BUG_ON(mem_cgroup_is_root(child));
3778 
3779 	ret = -EBUSY;
3780 	if (!get_page_unless_zero(page))
3781 		goto out;
3782 	if (isolate_lru_page(page))
3783 		goto put;
3784 
3785 	nr_pages = hpage_nr_pages(page);
3786 
3787 	parent = parent_mem_cgroup(child);
3788 	/*
3789 	 * If no parent, move charges to root cgroup.
3790 	 */
3791 	if (!parent)
3792 		parent = root_mem_cgroup;
3793 
3794 	if (nr_pages > 1) {
3795 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3796 		flags = compound_lock_irqsave(page);
3797 	}
3798 
3799 	ret = mem_cgroup_move_account(page, nr_pages,
3800 				pc, child, parent);
3801 	if (!ret)
3802 		__mem_cgroup_cancel_local_charge(child, nr_pages);
3803 
3804 	if (nr_pages > 1)
3805 		compound_unlock_irqrestore(page, flags);
3806 	putback_lru_page(page);
3807 put:
3808 	put_page(page);
3809 out:
3810 	return ret;
3811 }
3812 
3813 int mem_cgroup_charge_anon(struct page *page,
3814 			      struct mm_struct *mm, gfp_t gfp_mask)
3815 {
3816 	unsigned int nr_pages = 1;
3817 	struct mem_cgroup *memcg;
3818 	bool oom = true;
3819 
3820 	if (mem_cgroup_disabled())
3821 		return 0;
3822 
3823 	VM_BUG_ON_PAGE(page_mapped(page), page);
3824 	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3825 	VM_BUG_ON(!mm);
3826 
3827 	if (PageTransHuge(page)) {
3828 		nr_pages <<= compound_order(page);
3829 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3830 		/*
3831 		 * Never OOM-kill a process for a huge page.  The
3832 		 * fault handler will fall back to regular pages.
3833 		 */
3834 		oom = false;
3835 	}
3836 
3837 	memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3838 	if (!memcg)
3839 		return -ENOMEM;
3840 	__mem_cgroup_commit_charge(memcg, page, nr_pages,
3841 				   MEM_CGROUP_CHARGE_TYPE_ANON, false);
3842 	return 0;
3843 }
3844 
3845 /*
3846  * While swap-in, try_charge -> commit or cancel, the page is locked.
3847  * And when try_charge() successfully returns, one refcnt to memcg without
3848  * struct page_cgroup is acquired. This refcnt will be consumed by
3849  * "commit()" or removed by "cancel()"
3850  */
3851 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3852 					  struct page *page,
3853 					  gfp_t mask,
3854 					  struct mem_cgroup **memcgp)
3855 {
3856 	struct mem_cgroup *memcg = NULL;
3857 	struct page_cgroup *pc;
3858 	int ret;
3859 
3860 	pc = lookup_page_cgroup(page);
3861 	/*
3862 	 * Every swap fault against a single page tries to charge the
3863 	 * page, bail as early as possible.  shmem_unuse() encounters
3864 	 * already charged pages, too.  The USED bit is protected by
3865 	 * the page lock, which serializes swap cache removal, which
3866 	 * in turn serializes uncharging.
3867 	 */
3868 	if (PageCgroupUsed(pc))
3869 		goto out;
3870 	if (do_swap_account)
3871 		memcg = try_get_mem_cgroup_from_page(page);
3872 	if (!memcg)
3873 		memcg = get_mem_cgroup_from_mm(mm);
3874 	ret = mem_cgroup_try_charge(memcg, mask, 1, true);
3875 	css_put(&memcg->css);
3876 	if (ret == -EINTR)
3877 		memcg = root_mem_cgroup;
3878 	else if (ret)
3879 		return ret;
3880 out:
3881 	*memcgp = memcg;
3882 	return 0;
3883 }
3884 
3885 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3886 				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3887 {
3888 	if (mem_cgroup_disabled()) {
3889 		*memcgp = NULL;
3890 		return 0;
3891 	}
3892 	/*
3893 	 * A racing thread's fault, or swapoff, may have already
3894 	 * updated the pte, and even removed page from swap cache: in
3895 	 * those cases unuse_pte()'s pte_same() test will fail; but
3896 	 * there's also a KSM case which does need to charge the page.
3897 	 */
3898 	if (!PageSwapCache(page)) {
3899 		struct mem_cgroup *memcg;
3900 
3901 		memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3902 		if (!memcg)
3903 			return -ENOMEM;
3904 		*memcgp = memcg;
3905 		return 0;
3906 	}
3907 	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3908 }
3909 
3910 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3911 {
3912 	if (mem_cgroup_disabled())
3913 		return;
3914 	if (!memcg)
3915 		return;
3916 	__mem_cgroup_cancel_charge(memcg, 1);
3917 }
3918 
3919 static void
3920 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
3921 					enum charge_type ctype)
3922 {
3923 	if (mem_cgroup_disabled())
3924 		return;
3925 	if (!memcg)
3926 		return;
3927 
3928 	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3929 	/*
3930 	 * Now swap is on-memory. This means this page may be
3931 	 * counted both as mem and swap....double count.
3932 	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3933 	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3934 	 * may call delete_from_swap_cache() before reach here.
3935 	 */
3936 	if (do_swap_account && PageSwapCache(page)) {
3937 		swp_entry_t ent = {.val = page_private(page)};
3938 		mem_cgroup_uncharge_swap(ent);
3939 	}
3940 }
3941 
3942 void mem_cgroup_commit_charge_swapin(struct page *page,
3943 				     struct mem_cgroup *memcg)
3944 {
3945 	__mem_cgroup_commit_charge_swapin(page, memcg,
3946 					  MEM_CGROUP_CHARGE_TYPE_ANON);
3947 }
3948 
3949 int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3950 				gfp_t gfp_mask)
3951 {
3952 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3953 	struct mem_cgroup *memcg;
3954 	int ret;
3955 
3956 	if (mem_cgroup_disabled())
3957 		return 0;
3958 	if (PageCompound(page))
3959 		return 0;
3960 
3961 	if (PageSwapCache(page)) { /* shmem */
3962 		ret = __mem_cgroup_try_charge_swapin(mm, page,
3963 						     gfp_mask, &memcg);
3964 		if (ret)
3965 			return ret;
3966 		__mem_cgroup_commit_charge_swapin(page, memcg, type);
3967 		return 0;
3968 	}
3969 
3970 	memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3971 	if (!memcg)
3972 		return -ENOMEM;
3973 	__mem_cgroup_commit_charge(memcg, page, 1, type, false);
3974 	return 0;
3975 }
3976 
3977 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3978 				   unsigned int nr_pages,
3979 				   const enum charge_type ctype)
3980 {
3981 	struct memcg_batch_info *batch = NULL;
3982 	bool uncharge_memsw = true;
3983 
3984 	/* If swapout, usage of swap doesn't decrease */
3985 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3986 		uncharge_memsw = false;
3987 
3988 	batch = &current->memcg_batch;
3989 	/*
3990 	 * In usual, we do css_get() when we remember memcg pointer.
3991 	 * But in this case, we keep res->usage until end of a series of
3992 	 * uncharges. Then, it's ok to ignore memcg's refcnt.
3993 	 */
3994 	if (!batch->memcg)
3995 		batch->memcg = memcg;
3996 	/*
3997 	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
3998 	 * In those cases, all pages freed continuously can be expected to be in
3999 	 * the same cgroup and we have chance to coalesce uncharges.
4000 	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4001 	 * because we want to do uncharge as soon as possible.
4002 	 */
4003 
4004 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4005 		goto direct_uncharge;
4006 
4007 	if (nr_pages > 1)
4008 		goto direct_uncharge;
4009 
4010 	/*
4011 	 * In typical case, batch->memcg == mem. This means we can
4012 	 * merge a series of uncharges to an uncharge of res_counter.
4013 	 * If not, we uncharge res_counter ony by one.
4014 	 */
4015 	if (batch->memcg != memcg)
4016 		goto direct_uncharge;
4017 	/* remember freed charge and uncharge it later */
4018 	batch->nr_pages++;
4019 	if (uncharge_memsw)
4020 		batch->memsw_nr_pages++;
4021 	return;
4022 direct_uncharge:
4023 	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
4024 	if (uncharge_memsw)
4025 		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4026 	if (unlikely(batch->memcg != memcg))
4027 		memcg_oom_recover(memcg);
4028 }
4029 
4030 /*
4031  * uncharge if !page_mapped(page)
4032  */
4033 static struct mem_cgroup *
4034 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4035 			     bool end_migration)
4036 {
4037 	struct mem_cgroup *memcg = NULL;
4038 	unsigned int nr_pages = 1;
4039 	struct page_cgroup *pc;
4040 	bool anon;
4041 
4042 	if (mem_cgroup_disabled())
4043 		return NULL;
4044 
4045 	if (PageTransHuge(page)) {
4046 		nr_pages <<= compound_order(page);
4047 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
4048 	}
4049 	/*
4050 	 * Check if our page_cgroup is valid
4051 	 */
4052 	pc = lookup_page_cgroup(page);
4053 	if (unlikely(!PageCgroupUsed(pc)))
4054 		return NULL;
4055 
4056 	lock_page_cgroup(pc);
4057 
4058 	memcg = pc->mem_cgroup;
4059 
4060 	if (!PageCgroupUsed(pc))
4061 		goto unlock_out;
4062 
4063 	anon = PageAnon(page);
4064 
4065 	switch (ctype) {
4066 	case MEM_CGROUP_CHARGE_TYPE_ANON:
4067 		/*
4068 		 * Generally PageAnon tells if it's the anon statistics to be
4069 		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4070 		 * used before page reached the stage of being marked PageAnon.
4071 		 */
4072 		anon = true;
4073 		/* fallthrough */
4074 	case MEM_CGROUP_CHARGE_TYPE_DROP:
4075 		/* See mem_cgroup_prepare_migration() */
4076 		if (page_mapped(page))
4077 			goto unlock_out;
4078 		/*
4079 		 * Pages under migration may not be uncharged.  But
4080 		 * end_migration() /must/ be the one uncharging the
4081 		 * unused post-migration page and so it has to call
4082 		 * here with the migration bit still set.  See the
4083 		 * res_counter handling below.
4084 		 */
4085 		if (!end_migration && PageCgroupMigration(pc))
4086 			goto unlock_out;
4087 		break;
4088 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4089 		if (!PageAnon(page)) {	/* Shared memory */
4090 			if (page->mapping && !page_is_file_cache(page))
4091 				goto unlock_out;
4092 		} else if (page_mapped(page)) /* Anon */
4093 				goto unlock_out;
4094 		break;
4095 	default:
4096 		break;
4097 	}
4098 
4099 	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
4100 
4101 	ClearPageCgroupUsed(pc);
4102 	/*
4103 	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4104 	 * freed from LRU. This is safe because uncharged page is expected not
4105 	 * to be reused (freed soon). Exception is SwapCache, it's handled by
4106 	 * special functions.
4107 	 */
4108 
4109 	unlock_page_cgroup(pc);
4110 	/*
4111 	 * even after unlock, we have memcg->res.usage here and this memcg
4112 	 * will never be freed, so it's safe to call css_get().
4113 	 */
4114 	memcg_check_events(memcg, page);
4115 	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4116 		mem_cgroup_swap_statistics(memcg, true);
4117 		css_get(&memcg->css);
4118 	}
4119 	/*
4120 	 * Migration does not charge the res_counter for the
4121 	 * replacement page, so leave it alone when phasing out the
4122 	 * page that is unused after the migration.
4123 	 */
4124 	if (!end_migration && !mem_cgroup_is_root(memcg))
4125 		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
4126 
4127 	return memcg;
4128 
4129 unlock_out:
4130 	unlock_page_cgroup(pc);
4131 	return NULL;
4132 }
4133 
4134 void mem_cgroup_uncharge_page(struct page *page)
4135 {
4136 	/* early check. */
4137 	if (page_mapped(page))
4138 		return;
4139 	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4140 	/*
4141 	 * If the page is in swap cache, uncharge should be deferred
4142 	 * to the swap path, which also properly accounts swap usage
4143 	 * and handles memcg lifetime.
4144 	 *
4145 	 * Note that this check is not stable and reclaim may add the
4146 	 * page to swap cache at any time after this.  However, if the
4147 	 * page is not in swap cache by the time page->mapcount hits
4148 	 * 0, there won't be any page table references to the swap
4149 	 * slot, and reclaim will free it and not actually write the
4150 	 * page to disk.
4151 	 */
4152 	if (PageSwapCache(page))
4153 		return;
4154 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
4155 }
4156 
4157 void mem_cgroup_uncharge_cache_page(struct page *page)
4158 {
4159 	VM_BUG_ON_PAGE(page_mapped(page), page);
4160 	VM_BUG_ON_PAGE(page->mapping, page);
4161 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4162 }
4163 
4164 /*
4165  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4166  * In that cases, pages are freed continuously and we can expect pages
4167  * are in the same memcg. All these calls itself limits the number of
4168  * pages freed at once, then uncharge_start/end() is called properly.
4169  * This may be called prural(2) times in a context,
4170  */
4171 
4172 void mem_cgroup_uncharge_start(void)
4173 {
4174 	current->memcg_batch.do_batch++;
4175 	/* We can do nest. */
4176 	if (current->memcg_batch.do_batch == 1) {
4177 		current->memcg_batch.memcg = NULL;
4178 		current->memcg_batch.nr_pages = 0;
4179 		current->memcg_batch.memsw_nr_pages = 0;
4180 	}
4181 }
4182 
4183 void mem_cgroup_uncharge_end(void)
4184 {
4185 	struct memcg_batch_info *batch = &current->memcg_batch;
4186 
4187 	if (!batch->do_batch)
4188 		return;
4189 
4190 	batch->do_batch--;
4191 	if (batch->do_batch) /* If stacked, do nothing. */
4192 		return;
4193 
4194 	if (!batch->memcg)
4195 		return;
4196 	/*
4197 	 * This "batch->memcg" is valid without any css_get/put etc...
4198 	 * bacause we hide charges behind us.
4199 	 */
4200 	if (batch->nr_pages)
4201 		res_counter_uncharge(&batch->memcg->res,
4202 				     batch->nr_pages * PAGE_SIZE);
4203 	if (batch->memsw_nr_pages)
4204 		res_counter_uncharge(&batch->memcg->memsw,
4205 				     batch->memsw_nr_pages * PAGE_SIZE);
4206 	memcg_oom_recover(batch->memcg);
4207 	/* forget this pointer (for sanity check) */
4208 	batch->memcg = NULL;
4209 }
4210 
4211 #ifdef CONFIG_SWAP
4212 /*
4213  * called after __delete_from_swap_cache() and drop "page" account.
4214  * memcg information is recorded to swap_cgroup of "ent"
4215  */
4216 void
4217 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4218 {
4219 	struct mem_cgroup *memcg;
4220 	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4221 
4222 	if (!swapout) /* this was a swap cache but the swap is unused ! */
4223 		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4224 
4225 	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4226 
4227 	/*
4228 	 * record memcg information,  if swapout && memcg != NULL,
4229 	 * css_get() was called in uncharge().
4230 	 */
4231 	if (do_swap_account && swapout && memcg)
4232 		swap_cgroup_record(ent, mem_cgroup_id(memcg));
4233 }
4234 #endif
4235 
4236 #ifdef CONFIG_MEMCG_SWAP
4237 /*
4238  * called from swap_entry_free(). remove record in swap_cgroup and
4239  * uncharge "memsw" account.
4240  */
4241 void mem_cgroup_uncharge_swap(swp_entry_t ent)
4242 {
4243 	struct mem_cgroup *memcg;
4244 	unsigned short id;
4245 
4246 	if (!do_swap_account)
4247 		return;
4248 
4249 	id = swap_cgroup_record(ent, 0);
4250 	rcu_read_lock();
4251 	memcg = mem_cgroup_lookup(id);
4252 	if (memcg) {
4253 		/*
4254 		 * We uncharge this because swap is freed.
4255 		 * This memcg can be obsolete one. We avoid calling css_tryget
4256 		 */
4257 		if (!mem_cgroup_is_root(memcg))
4258 			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4259 		mem_cgroup_swap_statistics(memcg, false);
4260 		css_put(&memcg->css);
4261 	}
4262 	rcu_read_unlock();
4263 }
4264 
4265 /**
4266  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4267  * @entry: swap entry to be moved
4268  * @from:  mem_cgroup which the entry is moved from
4269  * @to:  mem_cgroup which the entry is moved to
4270  *
4271  * It succeeds only when the swap_cgroup's record for this entry is the same
4272  * as the mem_cgroup's id of @from.
4273  *
4274  * Returns 0 on success, -EINVAL on failure.
4275  *
4276  * The caller must have charged to @to, IOW, called res_counter_charge() about
4277  * both res and memsw, and called css_get().
4278  */
4279 static int mem_cgroup_move_swap_account(swp_entry_t entry,
4280 				struct mem_cgroup *from, struct mem_cgroup *to)
4281 {
4282 	unsigned short old_id, new_id;
4283 
4284 	old_id = mem_cgroup_id(from);
4285 	new_id = mem_cgroup_id(to);
4286 
4287 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
4288 		mem_cgroup_swap_statistics(from, false);
4289 		mem_cgroup_swap_statistics(to, true);
4290 		/*
4291 		 * This function is only called from task migration context now.
4292 		 * It postpones res_counter and refcount handling till the end
4293 		 * of task migration(mem_cgroup_clear_mc()) for performance
4294 		 * improvement. But we cannot postpone css_get(to)  because if
4295 		 * the process that has been moved to @to does swap-in, the
4296 		 * refcount of @to might be decreased to 0.
4297 		 *
4298 		 * We are in attach() phase, so the cgroup is guaranteed to be
4299 		 * alive, so we can just call css_get().
4300 		 */
4301 		css_get(&to->css);
4302 		return 0;
4303 	}
4304 	return -EINVAL;
4305 }
4306 #else
4307 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4308 				struct mem_cgroup *from, struct mem_cgroup *to)
4309 {
4310 	return -EINVAL;
4311 }
4312 #endif
4313 
4314 /*
4315  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4316  * page belongs to.
4317  */
4318 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4319 				  struct mem_cgroup **memcgp)
4320 {
4321 	struct mem_cgroup *memcg = NULL;
4322 	unsigned int nr_pages = 1;
4323 	struct page_cgroup *pc;
4324 	enum charge_type ctype;
4325 
4326 	*memcgp = NULL;
4327 
4328 	if (mem_cgroup_disabled())
4329 		return;
4330 
4331 	if (PageTransHuge(page))
4332 		nr_pages <<= compound_order(page);
4333 
4334 	pc = lookup_page_cgroup(page);
4335 	lock_page_cgroup(pc);
4336 	if (PageCgroupUsed(pc)) {
4337 		memcg = pc->mem_cgroup;
4338 		css_get(&memcg->css);
4339 		/*
4340 		 * At migrating an anonymous page, its mapcount goes down
4341 		 * to 0 and uncharge() will be called. But, even if it's fully
4342 		 * unmapped, migration may fail and this page has to be
4343 		 * charged again. We set MIGRATION flag here and delay uncharge
4344 		 * until end_migration() is called
4345 		 *
4346 		 * Corner Case Thinking
4347 		 * A)
4348 		 * When the old page was mapped as Anon and it's unmap-and-freed
4349 		 * while migration was ongoing.
4350 		 * If unmap finds the old page, uncharge() of it will be delayed
4351 		 * until end_migration(). If unmap finds a new page, it's
4352 		 * uncharged when it make mapcount to be 1->0. If unmap code
4353 		 * finds swap_migration_entry, the new page will not be mapped
4354 		 * and end_migration() will find it(mapcount==0).
4355 		 *
4356 		 * B)
4357 		 * When the old page was mapped but migraion fails, the kernel
4358 		 * remaps it. A charge for it is kept by MIGRATION flag even
4359 		 * if mapcount goes down to 0. We can do remap successfully
4360 		 * without charging it again.
4361 		 *
4362 		 * C)
4363 		 * The "old" page is under lock_page() until the end of
4364 		 * migration, so, the old page itself will not be swapped-out.
4365 		 * If the new page is swapped out before end_migraton, our
4366 		 * hook to usual swap-out path will catch the event.
4367 		 */
4368 		if (PageAnon(page))
4369 			SetPageCgroupMigration(pc);
4370 	}
4371 	unlock_page_cgroup(pc);
4372 	/*
4373 	 * If the page is not charged at this point,
4374 	 * we return here.
4375 	 */
4376 	if (!memcg)
4377 		return;
4378 
4379 	*memcgp = memcg;
4380 	/*
4381 	 * We charge new page before it's used/mapped. So, even if unlock_page()
4382 	 * is called before end_migration, we can catch all events on this new
4383 	 * page. In the case new page is migrated but not remapped, new page's
4384 	 * mapcount will be finally 0 and we call uncharge in end_migration().
4385 	 */
4386 	if (PageAnon(page))
4387 		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4388 	else
4389 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4390 	/*
4391 	 * The page is committed to the memcg, but it's not actually
4392 	 * charged to the res_counter since we plan on replacing the
4393 	 * old one and only one page is going to be left afterwards.
4394 	 */
4395 	__mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4396 }
4397 
4398 /* remove redundant charge if migration failed*/
4399 void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4400 	struct page *oldpage, struct page *newpage, bool migration_ok)
4401 {
4402 	struct page *used, *unused;
4403 	struct page_cgroup *pc;
4404 	bool anon;
4405 
4406 	if (!memcg)
4407 		return;
4408 
4409 	if (!migration_ok) {
4410 		used = oldpage;
4411 		unused = newpage;
4412 	} else {
4413 		used = newpage;
4414 		unused = oldpage;
4415 	}
4416 	anon = PageAnon(used);
4417 	__mem_cgroup_uncharge_common(unused,
4418 				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4419 				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
4420 				     true);
4421 	css_put(&memcg->css);
4422 	/*
4423 	 * We disallowed uncharge of pages under migration because mapcount
4424 	 * of the page goes down to zero, temporarly.
4425 	 * Clear the flag and check the page should be charged.
4426 	 */
4427 	pc = lookup_page_cgroup(oldpage);
4428 	lock_page_cgroup(pc);
4429 	ClearPageCgroupMigration(pc);
4430 	unlock_page_cgroup(pc);
4431 
4432 	/*
4433 	 * If a page is a file cache, radix-tree replacement is very atomic
4434 	 * and we can skip this check. When it was an Anon page, its mapcount
4435 	 * goes down to 0. But because we added MIGRATION flage, it's not
4436 	 * uncharged yet. There are several case but page->mapcount check
4437 	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4438 	 * check. (see prepare_charge() also)
4439 	 */
4440 	if (anon)
4441 		mem_cgroup_uncharge_page(used);
4442 }
4443 
4444 /*
4445  * At replace page cache, newpage is not under any memcg but it's on
4446  * LRU. So, this function doesn't touch res_counter but handles LRU
4447  * in correct way. Both pages are locked so we cannot race with uncharge.
4448  */
4449 void mem_cgroup_replace_page_cache(struct page *oldpage,
4450 				  struct page *newpage)
4451 {
4452 	struct mem_cgroup *memcg = NULL;
4453 	struct page_cgroup *pc;
4454 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4455 
4456 	if (mem_cgroup_disabled())
4457 		return;
4458 
4459 	pc = lookup_page_cgroup(oldpage);
4460 	/* fix accounting on old pages */
4461 	lock_page_cgroup(pc);
4462 	if (PageCgroupUsed(pc)) {
4463 		memcg = pc->mem_cgroup;
4464 		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4465 		ClearPageCgroupUsed(pc);
4466 	}
4467 	unlock_page_cgroup(pc);
4468 
4469 	/*
4470 	 * When called from shmem_replace_page(), in some cases the
4471 	 * oldpage has already been charged, and in some cases not.
4472 	 */
4473 	if (!memcg)
4474 		return;
4475 	/*
4476 	 * Even if newpage->mapping was NULL before starting replacement,
4477 	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4478 	 * LRU while we overwrite pc->mem_cgroup.
4479 	 */
4480 	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4481 }
4482 
4483 #ifdef CONFIG_DEBUG_VM
4484 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4485 {
4486 	struct page_cgroup *pc;
4487 
4488 	pc = lookup_page_cgroup(page);
4489 	/*
4490 	 * Can be NULL while feeding pages into the page allocator for
4491 	 * the first time, i.e. during boot or memory hotplug;
4492 	 * or when mem_cgroup_disabled().
4493 	 */
4494 	if (likely(pc) && PageCgroupUsed(pc))
4495 		return pc;
4496 	return NULL;
4497 }
4498 
4499 bool mem_cgroup_bad_page_check(struct page *page)
4500 {
4501 	if (mem_cgroup_disabled())
4502 		return false;
4503 
4504 	return lookup_page_cgroup_used(page) != NULL;
4505 }
4506 
4507 void mem_cgroup_print_bad_page(struct page *page)
4508 {
4509 	struct page_cgroup *pc;
4510 
4511 	pc = lookup_page_cgroup_used(page);
4512 	if (pc) {
4513 		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4514 			 pc, pc->flags, pc->mem_cgroup);
4515 	}
4516 }
4517 #endif
4518 
4519 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4520 				unsigned long long val)
4521 {
4522 	int retry_count;
4523 	u64 memswlimit, memlimit;
4524 	int ret = 0;
4525 	int children = mem_cgroup_count_children(memcg);
4526 	u64 curusage, oldusage;
4527 	int enlarge;
4528 
4529 	/*
4530 	 * For keeping hierarchical_reclaim simple, how long we should retry
4531 	 * is depends on callers. We set our retry-count to be function
4532 	 * of # of children which we should visit in this loop.
4533 	 */
4534 	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4535 
4536 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4537 
4538 	enlarge = 0;
4539 	while (retry_count) {
4540 		if (signal_pending(current)) {
4541 			ret = -EINTR;
4542 			break;
4543 		}
4544 		/*
4545 		 * Rather than hide all in some function, I do this in
4546 		 * open coded manner. You see what this really does.
4547 		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4548 		 */
4549 		mutex_lock(&set_limit_mutex);
4550 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4551 		if (memswlimit < val) {
4552 			ret = -EINVAL;
4553 			mutex_unlock(&set_limit_mutex);
4554 			break;
4555 		}
4556 
4557 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4558 		if (memlimit < val)
4559 			enlarge = 1;
4560 
4561 		ret = res_counter_set_limit(&memcg->res, val);
4562 		if (!ret) {
4563 			if (memswlimit == val)
4564 				memcg->memsw_is_minimum = true;
4565 			else
4566 				memcg->memsw_is_minimum = false;
4567 		}
4568 		mutex_unlock(&set_limit_mutex);
4569 
4570 		if (!ret)
4571 			break;
4572 
4573 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4574 				   MEM_CGROUP_RECLAIM_SHRINK);
4575 		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4576 		/* Usage is reduced ? */
4577 		if (curusage >= oldusage)
4578 			retry_count--;
4579 		else
4580 			oldusage = curusage;
4581 	}
4582 	if (!ret && enlarge)
4583 		memcg_oom_recover(memcg);
4584 
4585 	return ret;
4586 }
4587 
4588 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4589 					unsigned long long val)
4590 {
4591 	int retry_count;
4592 	u64 memlimit, memswlimit, oldusage, curusage;
4593 	int children = mem_cgroup_count_children(memcg);
4594 	int ret = -EBUSY;
4595 	int enlarge = 0;
4596 
4597 	/* see mem_cgroup_resize_res_limit */
4598 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4599 	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4600 	while (retry_count) {
4601 		if (signal_pending(current)) {
4602 			ret = -EINTR;
4603 			break;
4604 		}
4605 		/*
4606 		 * Rather than hide all in some function, I do this in
4607 		 * open coded manner. You see what this really does.
4608 		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4609 		 */
4610 		mutex_lock(&set_limit_mutex);
4611 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4612 		if (memlimit > val) {
4613 			ret = -EINVAL;
4614 			mutex_unlock(&set_limit_mutex);
4615 			break;
4616 		}
4617 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4618 		if (memswlimit < val)
4619 			enlarge = 1;
4620 		ret = res_counter_set_limit(&memcg->memsw, val);
4621 		if (!ret) {
4622 			if (memlimit == val)
4623 				memcg->memsw_is_minimum = true;
4624 			else
4625 				memcg->memsw_is_minimum = false;
4626 		}
4627 		mutex_unlock(&set_limit_mutex);
4628 
4629 		if (!ret)
4630 			break;
4631 
4632 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4633 				   MEM_CGROUP_RECLAIM_NOSWAP |
4634 				   MEM_CGROUP_RECLAIM_SHRINK);
4635 		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4636 		/* Usage is reduced ? */
4637 		if (curusage >= oldusage)
4638 			retry_count--;
4639 		else
4640 			oldusage = curusage;
4641 	}
4642 	if (!ret && enlarge)
4643 		memcg_oom_recover(memcg);
4644 	return ret;
4645 }
4646 
4647 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4648 					    gfp_t gfp_mask,
4649 					    unsigned long *total_scanned)
4650 {
4651 	unsigned long nr_reclaimed = 0;
4652 	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4653 	unsigned long reclaimed;
4654 	int loop = 0;
4655 	struct mem_cgroup_tree_per_zone *mctz;
4656 	unsigned long long excess;
4657 	unsigned long nr_scanned;
4658 
4659 	if (order > 0)
4660 		return 0;
4661 
4662 	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4663 	/*
4664 	 * This loop can run a while, specially if mem_cgroup's continuously
4665 	 * keep exceeding their soft limit and putting the system under
4666 	 * pressure
4667 	 */
4668 	do {
4669 		if (next_mz)
4670 			mz = next_mz;
4671 		else
4672 			mz = mem_cgroup_largest_soft_limit_node(mctz);
4673 		if (!mz)
4674 			break;
4675 
4676 		nr_scanned = 0;
4677 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4678 						    gfp_mask, &nr_scanned);
4679 		nr_reclaimed += reclaimed;
4680 		*total_scanned += nr_scanned;
4681 		spin_lock(&mctz->lock);
4682 
4683 		/*
4684 		 * If we failed to reclaim anything from this memory cgroup
4685 		 * it is time to move on to the next cgroup
4686 		 */
4687 		next_mz = NULL;
4688 		if (!reclaimed) {
4689 			do {
4690 				/*
4691 				 * Loop until we find yet another one.
4692 				 *
4693 				 * By the time we get the soft_limit lock
4694 				 * again, someone might have aded the
4695 				 * group back on the RB tree. Iterate to
4696 				 * make sure we get a different mem.
4697 				 * mem_cgroup_largest_soft_limit_node returns
4698 				 * NULL if no other cgroup is present on
4699 				 * the tree
4700 				 */
4701 				next_mz =
4702 				__mem_cgroup_largest_soft_limit_node(mctz);
4703 				if (next_mz == mz)
4704 					css_put(&next_mz->memcg->css);
4705 				else /* next_mz == NULL or other memcg */
4706 					break;
4707 			} while (1);
4708 		}
4709 		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4710 		excess = res_counter_soft_limit_excess(&mz->memcg->res);
4711 		/*
4712 		 * One school of thought says that we should not add
4713 		 * back the node to the tree if reclaim returns 0.
4714 		 * But our reclaim could return 0, simply because due
4715 		 * to priority we are exposing a smaller subset of
4716 		 * memory to reclaim from. Consider this as a longer
4717 		 * term TODO.
4718 		 */
4719 		/* If excess == 0, no tree ops */
4720 		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4721 		spin_unlock(&mctz->lock);
4722 		css_put(&mz->memcg->css);
4723 		loop++;
4724 		/*
4725 		 * Could not reclaim anything and there are no more
4726 		 * mem cgroups to try or we seem to be looping without
4727 		 * reclaiming anything.
4728 		 */
4729 		if (!nr_reclaimed &&
4730 			(next_mz == NULL ||
4731 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4732 			break;
4733 	} while (!nr_reclaimed);
4734 	if (next_mz)
4735 		css_put(&next_mz->memcg->css);
4736 	return nr_reclaimed;
4737 }
4738 
4739 /**
4740  * mem_cgroup_force_empty_list - clears LRU of a group
4741  * @memcg: group to clear
4742  * @node: NUMA node
4743  * @zid: zone id
4744  * @lru: lru to to clear
4745  *
4746  * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
4747  * reclaim the pages page themselves - pages are moved to the parent (or root)
4748  * group.
4749  */
4750 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
4751 				int node, int zid, enum lru_list lru)
4752 {
4753 	struct lruvec *lruvec;
4754 	unsigned long flags;
4755 	struct list_head *list;
4756 	struct page *busy;
4757 	struct zone *zone;
4758 
4759 	zone = &NODE_DATA(node)->node_zones[zid];
4760 	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4761 	list = &lruvec->lists[lru];
4762 
4763 	busy = NULL;
4764 	do {
4765 		struct page_cgroup *pc;
4766 		struct page *page;
4767 
4768 		spin_lock_irqsave(&zone->lru_lock, flags);
4769 		if (list_empty(list)) {
4770 			spin_unlock_irqrestore(&zone->lru_lock, flags);
4771 			break;
4772 		}
4773 		page = list_entry(list->prev, struct page, lru);
4774 		if (busy == page) {
4775 			list_move(&page->lru, list);
4776 			busy = NULL;
4777 			spin_unlock_irqrestore(&zone->lru_lock, flags);
4778 			continue;
4779 		}
4780 		spin_unlock_irqrestore(&zone->lru_lock, flags);
4781 
4782 		pc = lookup_page_cgroup(page);
4783 
4784 		if (mem_cgroup_move_parent(page, pc, memcg)) {
4785 			/* found lock contention or "pc" is obsolete. */
4786 			busy = page;
4787 			cond_resched();
4788 		} else
4789 			busy = NULL;
4790 	} while (!list_empty(list));
4791 }
4792 
4793 /*
4794  * make mem_cgroup's charge to be 0 if there is no task by moving
4795  * all the charges and pages to the parent.
4796  * This enables deleting this mem_cgroup.
4797  *
4798  * Caller is responsible for holding css reference on the memcg.
4799  */
4800 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4801 {
4802 	int node, zid;
4803 	u64 usage;
4804 
4805 	do {
4806 		/* This is for making all *used* pages to be on LRU. */
4807 		lru_add_drain_all();
4808 		drain_all_stock_sync(memcg);
4809 		mem_cgroup_start_move(memcg);
4810 		for_each_node_state(node, N_MEMORY) {
4811 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4812 				enum lru_list lru;
4813 				for_each_lru(lru) {
4814 					mem_cgroup_force_empty_list(memcg,
4815 							node, zid, lru);
4816 				}
4817 			}
4818 		}
4819 		mem_cgroup_end_move(memcg);
4820 		memcg_oom_recover(memcg);
4821 		cond_resched();
4822 
4823 		/*
4824 		 * Kernel memory may not necessarily be trackable to a specific
4825 		 * process. So they are not migrated, and therefore we can't
4826 		 * expect their value to drop to 0 here.
4827 		 * Having res filled up with kmem only is enough.
4828 		 *
4829 		 * This is a safety check because mem_cgroup_force_empty_list
4830 		 * could have raced with mem_cgroup_replace_page_cache callers
4831 		 * so the lru seemed empty but the page could have been added
4832 		 * right after the check. RES_USAGE should be safe as we always
4833 		 * charge before adding to the LRU.
4834 		 */
4835 		usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4836 			res_counter_read_u64(&memcg->kmem, RES_USAGE);
4837 	} while (usage > 0);
4838 }
4839 
4840 static inline bool memcg_has_children(struct mem_cgroup *memcg)
4841 {
4842 	lockdep_assert_held(&memcg_create_mutex);
4843 	/*
4844 	 * The lock does not prevent addition or deletion to the list
4845 	 * of children, but it prevents a new child from being
4846 	 * initialized based on this parent in css_online(), so it's
4847 	 * enough to decide whether hierarchically inherited
4848 	 * attributes can still be changed or not.
4849 	 */
4850 	return memcg->use_hierarchy &&
4851 		!list_empty(&memcg->css.cgroup->children);
4852 }
4853 
4854 /*
4855  * Reclaims as many pages from the given memcg as possible and moves
4856  * the rest to the parent.
4857  *
4858  * Caller is responsible for holding css reference for memcg.
4859  */
4860 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4861 {
4862 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4863 	struct cgroup *cgrp = memcg->css.cgroup;
4864 
4865 	/* returns EBUSY if there is a task or if we come here twice. */
4866 	if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
4867 		return -EBUSY;
4868 
4869 	/* we call try-to-free pages for make this cgroup empty */
4870 	lru_add_drain_all();
4871 	/* try to free all pages in this cgroup */
4872 	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4873 		int progress;
4874 
4875 		if (signal_pending(current))
4876 			return -EINTR;
4877 
4878 		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4879 						false);
4880 		if (!progress) {
4881 			nr_retries--;
4882 			/* maybe some writeback is necessary */
4883 			congestion_wait(BLK_RW_ASYNC, HZ/10);
4884 		}
4885 
4886 	}
4887 	lru_add_drain();
4888 	mem_cgroup_reparent_charges(memcg);
4889 
4890 	return 0;
4891 }
4892 
4893 static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4894 					unsigned int event)
4895 {
4896 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4897 
4898 	if (mem_cgroup_is_root(memcg))
4899 		return -EINVAL;
4900 	return mem_cgroup_force_empty(memcg);
4901 }
4902 
4903 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4904 				     struct cftype *cft)
4905 {
4906 	return mem_cgroup_from_css(css)->use_hierarchy;
4907 }
4908 
4909 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4910 				      struct cftype *cft, u64 val)
4911 {
4912 	int retval = 0;
4913 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4914 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4915 
4916 	mutex_lock(&memcg_create_mutex);
4917 
4918 	if (memcg->use_hierarchy == val)
4919 		goto out;
4920 
4921 	/*
4922 	 * If parent's use_hierarchy is set, we can't make any modifications
4923 	 * in the child subtrees. If it is unset, then the change can
4924 	 * occur, provided the current cgroup has no children.
4925 	 *
4926 	 * For the root cgroup, parent_mem is NULL, we allow value to be
4927 	 * set if there are no children.
4928 	 */
4929 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4930 				(val == 1 || val == 0)) {
4931 		if (list_empty(&memcg->css.cgroup->children))
4932 			memcg->use_hierarchy = val;
4933 		else
4934 			retval = -EBUSY;
4935 	} else
4936 		retval = -EINVAL;
4937 
4938 out:
4939 	mutex_unlock(&memcg_create_mutex);
4940 
4941 	return retval;
4942 }
4943 
4944 
4945 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4946 					       enum mem_cgroup_stat_index idx)
4947 {
4948 	struct mem_cgroup *iter;
4949 	long val = 0;
4950 
4951 	/* Per-cpu values can be negative, use a signed accumulator */
4952 	for_each_mem_cgroup_tree(iter, memcg)
4953 		val += mem_cgroup_read_stat(iter, idx);
4954 
4955 	if (val < 0) /* race ? */
4956 		val = 0;
4957 	return val;
4958 }
4959 
4960 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4961 {
4962 	u64 val;
4963 
4964 	if (!mem_cgroup_is_root(memcg)) {
4965 		if (!swap)
4966 			return res_counter_read_u64(&memcg->res, RES_USAGE);
4967 		else
4968 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4969 	}
4970 
4971 	/*
4972 	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4973 	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4974 	 */
4975 	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4976 	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4977 
4978 	if (swap)
4979 		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4980 
4981 	return val << PAGE_SHIFT;
4982 }
4983 
4984 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4985 				   struct cftype *cft)
4986 {
4987 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4988 	u64 val;
4989 	int name;
4990 	enum res_type type;
4991 
4992 	type = MEMFILE_TYPE(cft->private);
4993 	name = MEMFILE_ATTR(cft->private);
4994 
4995 	switch (type) {
4996 	case _MEM:
4997 		if (name == RES_USAGE)
4998 			val = mem_cgroup_usage(memcg, false);
4999 		else
5000 			val = res_counter_read_u64(&memcg->res, name);
5001 		break;
5002 	case _MEMSWAP:
5003 		if (name == RES_USAGE)
5004 			val = mem_cgroup_usage(memcg, true);
5005 		else
5006 			val = res_counter_read_u64(&memcg->memsw, name);
5007 		break;
5008 	case _KMEM:
5009 		val = res_counter_read_u64(&memcg->kmem, name);
5010 		break;
5011 	default:
5012 		BUG();
5013 	}
5014 
5015 	return val;
5016 }
5017 
5018 #ifdef CONFIG_MEMCG_KMEM
5019 /* should be called with activate_kmem_mutex held */
5020 static int __memcg_activate_kmem(struct mem_cgroup *memcg,
5021 				 unsigned long long limit)
5022 {
5023 	int err = 0;
5024 	int memcg_id;
5025 
5026 	if (memcg_kmem_is_active(memcg))
5027 		return 0;
5028 
5029 	/*
5030 	 * We are going to allocate memory for data shared by all memory
5031 	 * cgroups so let's stop accounting here.
5032 	 */
5033 	memcg_stop_kmem_account();
5034 
5035 	/*
5036 	 * For simplicity, we won't allow this to be disabled.  It also can't
5037 	 * be changed if the cgroup has children already, or if tasks had
5038 	 * already joined.
5039 	 *
5040 	 * If tasks join before we set the limit, a person looking at
5041 	 * kmem.usage_in_bytes will have no way to determine when it took
5042 	 * place, which makes the value quite meaningless.
5043 	 *
5044 	 * After it first became limited, changes in the value of the limit are
5045 	 * of course permitted.
5046 	 */
5047 	mutex_lock(&memcg_create_mutex);
5048 	if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
5049 		err = -EBUSY;
5050 	mutex_unlock(&memcg_create_mutex);
5051 	if (err)
5052 		goto out;
5053 
5054 	memcg_id = ida_simple_get(&kmem_limited_groups,
5055 				  0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
5056 	if (memcg_id < 0) {
5057 		err = memcg_id;
5058 		goto out;
5059 	}
5060 
5061 	/*
5062 	 * Make sure we have enough space for this cgroup in each root cache's
5063 	 * memcg_params.
5064 	 */
5065 	err = memcg_update_all_caches(memcg_id + 1);
5066 	if (err)
5067 		goto out_rmid;
5068 
5069 	memcg->kmemcg_id = memcg_id;
5070 	INIT_LIST_HEAD(&memcg->memcg_slab_caches);
5071 	mutex_init(&memcg->slab_caches_mutex);
5072 
5073 	/*
5074 	 * We couldn't have accounted to this cgroup, because it hasn't got the
5075 	 * active bit set yet, so this should succeed.
5076 	 */
5077 	err = res_counter_set_limit(&memcg->kmem, limit);
5078 	VM_BUG_ON(err);
5079 
5080 	static_key_slow_inc(&memcg_kmem_enabled_key);
5081 	/*
5082 	 * Setting the active bit after enabling static branching will
5083 	 * guarantee no one starts accounting before all call sites are
5084 	 * patched.
5085 	 */
5086 	memcg_kmem_set_active(memcg);
5087 out:
5088 	memcg_resume_kmem_account();
5089 	return err;
5090 
5091 out_rmid:
5092 	ida_simple_remove(&kmem_limited_groups, memcg_id);
5093 	goto out;
5094 }
5095 
5096 static int memcg_activate_kmem(struct mem_cgroup *memcg,
5097 			       unsigned long long limit)
5098 {
5099 	int ret;
5100 
5101 	mutex_lock(&activate_kmem_mutex);
5102 	ret = __memcg_activate_kmem(memcg, limit);
5103 	mutex_unlock(&activate_kmem_mutex);
5104 	return ret;
5105 }
5106 
5107 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5108 				   unsigned long long val)
5109 {
5110 	int ret;
5111 
5112 	if (!memcg_kmem_is_active(memcg))
5113 		ret = memcg_activate_kmem(memcg, val);
5114 	else
5115 		ret = res_counter_set_limit(&memcg->kmem, val);
5116 	return ret;
5117 }
5118 
5119 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
5120 {
5121 	int ret = 0;
5122 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5123 
5124 	if (!parent)
5125 		return 0;
5126 
5127 	mutex_lock(&activate_kmem_mutex);
5128 	/*
5129 	 * If the parent cgroup is not kmem-active now, it cannot be activated
5130 	 * after this point, because it has at least one child already.
5131 	 */
5132 	if (memcg_kmem_is_active(parent))
5133 		ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5134 	mutex_unlock(&activate_kmem_mutex);
5135 	return ret;
5136 }
5137 #else
5138 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5139 				   unsigned long long val)
5140 {
5141 	return -EINVAL;
5142 }
5143 #endif /* CONFIG_MEMCG_KMEM */
5144 
5145 /*
5146  * The user of this function is...
5147  * RES_LIMIT.
5148  */
5149 static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
5150 			    char *buffer)
5151 {
5152 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5153 	enum res_type type;
5154 	int name;
5155 	unsigned long long val;
5156 	int ret;
5157 
5158 	type = MEMFILE_TYPE(cft->private);
5159 	name = MEMFILE_ATTR(cft->private);
5160 
5161 	switch (name) {
5162 	case RES_LIMIT:
5163 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5164 			ret = -EINVAL;
5165 			break;
5166 		}
5167 		/* This function does all necessary parse...reuse it */
5168 		ret = res_counter_memparse_write_strategy(buffer, &val);
5169 		if (ret)
5170 			break;
5171 		if (type == _MEM)
5172 			ret = mem_cgroup_resize_limit(memcg, val);
5173 		else if (type == _MEMSWAP)
5174 			ret = mem_cgroup_resize_memsw_limit(memcg, val);
5175 		else if (type == _KMEM)
5176 			ret = memcg_update_kmem_limit(memcg, val);
5177 		else
5178 			return -EINVAL;
5179 		break;
5180 	case RES_SOFT_LIMIT:
5181 		ret = res_counter_memparse_write_strategy(buffer, &val);
5182 		if (ret)
5183 			break;
5184 		/*
5185 		 * For memsw, soft limits are hard to implement in terms
5186 		 * of semantics, for now, we support soft limits for
5187 		 * control without swap
5188 		 */
5189 		if (type == _MEM)
5190 			ret = res_counter_set_soft_limit(&memcg->res, val);
5191 		else
5192 			ret = -EINVAL;
5193 		break;
5194 	default:
5195 		ret = -EINVAL; /* should be BUG() ? */
5196 		break;
5197 	}
5198 	return ret;
5199 }
5200 
5201 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5202 		unsigned long long *mem_limit, unsigned long long *memsw_limit)
5203 {
5204 	unsigned long long min_limit, min_memsw_limit, tmp;
5205 
5206 	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5207 	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5208 	if (!memcg->use_hierarchy)
5209 		goto out;
5210 
5211 	while (css_parent(&memcg->css)) {
5212 		memcg = mem_cgroup_from_css(css_parent(&memcg->css));
5213 		if (!memcg->use_hierarchy)
5214 			break;
5215 		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5216 		min_limit = min(min_limit, tmp);
5217 		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5218 		min_memsw_limit = min(min_memsw_limit, tmp);
5219 	}
5220 out:
5221 	*mem_limit = min_limit;
5222 	*memsw_limit = min_memsw_limit;
5223 }
5224 
5225 static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
5226 {
5227 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5228 	int name;
5229 	enum res_type type;
5230 
5231 	type = MEMFILE_TYPE(event);
5232 	name = MEMFILE_ATTR(event);
5233 
5234 	switch (name) {
5235 	case RES_MAX_USAGE:
5236 		if (type == _MEM)
5237 			res_counter_reset_max(&memcg->res);
5238 		else if (type == _MEMSWAP)
5239 			res_counter_reset_max(&memcg->memsw);
5240 		else if (type == _KMEM)
5241 			res_counter_reset_max(&memcg->kmem);
5242 		else
5243 			return -EINVAL;
5244 		break;
5245 	case RES_FAILCNT:
5246 		if (type == _MEM)
5247 			res_counter_reset_failcnt(&memcg->res);
5248 		else if (type == _MEMSWAP)
5249 			res_counter_reset_failcnt(&memcg->memsw);
5250 		else if (type == _KMEM)
5251 			res_counter_reset_failcnt(&memcg->kmem);
5252 		else
5253 			return -EINVAL;
5254 		break;
5255 	}
5256 
5257 	return 0;
5258 }
5259 
5260 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
5261 					struct cftype *cft)
5262 {
5263 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
5264 }
5265 
5266 #ifdef CONFIG_MMU
5267 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5268 					struct cftype *cft, u64 val)
5269 {
5270 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5271 
5272 	if (val >= (1 << NR_MOVE_TYPE))
5273 		return -EINVAL;
5274 
5275 	/*
5276 	 * No kind of locking is needed in here, because ->can_attach() will
5277 	 * check this value once in the beginning of the process, and then carry
5278 	 * on with stale data. This means that changes to this value will only
5279 	 * affect task migrations starting after the change.
5280 	 */
5281 	memcg->move_charge_at_immigrate = val;
5282 	return 0;
5283 }
5284 #else
5285 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5286 					struct cftype *cft, u64 val)
5287 {
5288 	return -ENOSYS;
5289 }
5290 #endif
5291 
5292 #ifdef CONFIG_NUMA
5293 static int memcg_numa_stat_show(struct seq_file *m, void *v)
5294 {
5295 	struct numa_stat {
5296 		const char *name;
5297 		unsigned int lru_mask;
5298 	};
5299 
5300 	static const struct numa_stat stats[] = {
5301 		{ "total", LRU_ALL },
5302 		{ "file", LRU_ALL_FILE },
5303 		{ "anon", LRU_ALL_ANON },
5304 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
5305 	};
5306 	const struct numa_stat *stat;
5307 	int nid;
5308 	unsigned long nr;
5309 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5310 
5311 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5312 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5313 		seq_printf(m, "%s=%lu", stat->name, nr);
5314 		for_each_node_state(nid, N_MEMORY) {
5315 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5316 							  stat->lru_mask);
5317 			seq_printf(m, " N%d=%lu", nid, nr);
5318 		}
5319 		seq_putc(m, '\n');
5320 	}
5321 
5322 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5323 		struct mem_cgroup *iter;
5324 
5325 		nr = 0;
5326 		for_each_mem_cgroup_tree(iter, memcg)
5327 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5328 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5329 		for_each_node_state(nid, N_MEMORY) {
5330 			nr = 0;
5331 			for_each_mem_cgroup_tree(iter, memcg)
5332 				nr += mem_cgroup_node_nr_lru_pages(
5333 					iter, nid, stat->lru_mask);
5334 			seq_printf(m, " N%d=%lu", nid, nr);
5335 		}
5336 		seq_putc(m, '\n');
5337 	}
5338 
5339 	return 0;
5340 }
5341 #endif /* CONFIG_NUMA */
5342 
5343 static inline void mem_cgroup_lru_names_not_uptodate(void)
5344 {
5345 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5346 }
5347 
5348 static int memcg_stat_show(struct seq_file *m, void *v)
5349 {
5350 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5351 	struct mem_cgroup *mi;
5352 	unsigned int i;
5353 
5354 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5355 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5356 			continue;
5357 		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5358 			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5359 	}
5360 
5361 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5362 		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5363 			   mem_cgroup_read_events(memcg, i));
5364 
5365 	for (i = 0; i < NR_LRU_LISTS; i++)
5366 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5367 			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5368 
5369 	/* Hierarchical information */
5370 	{
5371 		unsigned long long limit, memsw_limit;
5372 		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5373 		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5374 		if (do_swap_account)
5375 			seq_printf(m, "hierarchical_memsw_limit %llu\n",
5376 				   memsw_limit);
5377 	}
5378 
5379 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5380 		long long val = 0;
5381 
5382 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5383 			continue;
5384 		for_each_mem_cgroup_tree(mi, memcg)
5385 			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5386 		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5387 	}
5388 
5389 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5390 		unsigned long long val = 0;
5391 
5392 		for_each_mem_cgroup_tree(mi, memcg)
5393 			val += mem_cgroup_read_events(mi, i);
5394 		seq_printf(m, "total_%s %llu\n",
5395 			   mem_cgroup_events_names[i], val);
5396 	}
5397 
5398 	for (i = 0; i < NR_LRU_LISTS; i++) {
5399 		unsigned long long val = 0;
5400 
5401 		for_each_mem_cgroup_tree(mi, memcg)
5402 			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5403 		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5404 	}
5405 
5406 #ifdef CONFIG_DEBUG_VM
5407 	{
5408 		int nid, zid;
5409 		struct mem_cgroup_per_zone *mz;
5410 		struct zone_reclaim_stat *rstat;
5411 		unsigned long recent_rotated[2] = {0, 0};
5412 		unsigned long recent_scanned[2] = {0, 0};
5413 
5414 		for_each_online_node(nid)
5415 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5416 				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5417 				rstat = &mz->lruvec.reclaim_stat;
5418 
5419 				recent_rotated[0] += rstat->recent_rotated[0];
5420 				recent_rotated[1] += rstat->recent_rotated[1];
5421 				recent_scanned[0] += rstat->recent_scanned[0];
5422 				recent_scanned[1] += rstat->recent_scanned[1];
5423 			}
5424 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5425 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5426 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5427 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
5428 	}
5429 #endif
5430 
5431 	return 0;
5432 }
5433 
5434 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5435 				      struct cftype *cft)
5436 {
5437 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5438 
5439 	return mem_cgroup_swappiness(memcg);
5440 }
5441 
5442 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5443 				       struct cftype *cft, u64 val)
5444 {
5445 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5446 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5447 
5448 	if (val > 100 || !parent)
5449 		return -EINVAL;
5450 
5451 	mutex_lock(&memcg_create_mutex);
5452 
5453 	/* If under hierarchy, only empty-root can set this value */
5454 	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5455 		mutex_unlock(&memcg_create_mutex);
5456 		return -EINVAL;
5457 	}
5458 
5459 	memcg->swappiness = val;
5460 
5461 	mutex_unlock(&memcg_create_mutex);
5462 
5463 	return 0;
5464 }
5465 
5466 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5467 {
5468 	struct mem_cgroup_threshold_ary *t;
5469 	u64 usage;
5470 	int i;
5471 
5472 	rcu_read_lock();
5473 	if (!swap)
5474 		t = rcu_dereference(memcg->thresholds.primary);
5475 	else
5476 		t = rcu_dereference(memcg->memsw_thresholds.primary);
5477 
5478 	if (!t)
5479 		goto unlock;
5480 
5481 	usage = mem_cgroup_usage(memcg, swap);
5482 
5483 	/*
5484 	 * current_threshold points to threshold just below or equal to usage.
5485 	 * If it's not true, a threshold was crossed after last
5486 	 * call of __mem_cgroup_threshold().
5487 	 */
5488 	i = t->current_threshold;
5489 
5490 	/*
5491 	 * Iterate backward over array of thresholds starting from
5492 	 * current_threshold and check if a threshold is crossed.
5493 	 * If none of thresholds below usage is crossed, we read
5494 	 * only one element of the array here.
5495 	 */
5496 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5497 		eventfd_signal(t->entries[i].eventfd, 1);
5498 
5499 	/* i = current_threshold + 1 */
5500 	i++;
5501 
5502 	/*
5503 	 * Iterate forward over array of thresholds starting from
5504 	 * current_threshold+1 and check if a threshold is crossed.
5505 	 * If none of thresholds above usage is crossed, we read
5506 	 * only one element of the array here.
5507 	 */
5508 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5509 		eventfd_signal(t->entries[i].eventfd, 1);
5510 
5511 	/* Update current_threshold */
5512 	t->current_threshold = i - 1;
5513 unlock:
5514 	rcu_read_unlock();
5515 }
5516 
5517 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5518 {
5519 	while (memcg) {
5520 		__mem_cgroup_threshold(memcg, false);
5521 		if (do_swap_account)
5522 			__mem_cgroup_threshold(memcg, true);
5523 
5524 		memcg = parent_mem_cgroup(memcg);
5525 	}
5526 }
5527 
5528 static int compare_thresholds(const void *a, const void *b)
5529 {
5530 	const struct mem_cgroup_threshold *_a = a;
5531 	const struct mem_cgroup_threshold *_b = b;
5532 
5533 	if (_a->threshold > _b->threshold)
5534 		return 1;
5535 
5536 	if (_a->threshold < _b->threshold)
5537 		return -1;
5538 
5539 	return 0;
5540 }
5541 
5542 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5543 {
5544 	struct mem_cgroup_eventfd_list *ev;
5545 
5546 	list_for_each_entry(ev, &memcg->oom_notify, list)
5547 		eventfd_signal(ev->eventfd, 1);
5548 	return 0;
5549 }
5550 
5551 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
5552 {
5553 	struct mem_cgroup *iter;
5554 
5555 	for_each_mem_cgroup_tree(iter, memcg)
5556 		mem_cgroup_oom_notify_cb(iter);
5557 }
5558 
5559 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5560 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
5561 {
5562 	struct mem_cgroup_thresholds *thresholds;
5563 	struct mem_cgroup_threshold_ary *new;
5564 	u64 threshold, usage;
5565 	int i, size, ret;
5566 
5567 	ret = res_counter_memparse_write_strategy(args, &threshold);
5568 	if (ret)
5569 		return ret;
5570 
5571 	mutex_lock(&memcg->thresholds_lock);
5572 
5573 	if (type == _MEM)
5574 		thresholds = &memcg->thresholds;
5575 	else if (type == _MEMSWAP)
5576 		thresholds = &memcg->memsw_thresholds;
5577 	else
5578 		BUG();
5579 
5580 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5581 
5582 	/* Check if a threshold crossed before adding a new one */
5583 	if (thresholds->primary)
5584 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5585 
5586 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5587 
5588 	/* Allocate memory for new array of thresholds */
5589 	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5590 			GFP_KERNEL);
5591 	if (!new) {
5592 		ret = -ENOMEM;
5593 		goto unlock;
5594 	}
5595 	new->size = size;
5596 
5597 	/* Copy thresholds (if any) to new array */
5598 	if (thresholds->primary) {
5599 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5600 				sizeof(struct mem_cgroup_threshold));
5601 	}
5602 
5603 	/* Add new threshold */
5604 	new->entries[size - 1].eventfd = eventfd;
5605 	new->entries[size - 1].threshold = threshold;
5606 
5607 	/* Sort thresholds. Registering of new threshold isn't time-critical */
5608 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5609 			compare_thresholds, NULL);
5610 
5611 	/* Find current threshold */
5612 	new->current_threshold = -1;
5613 	for (i = 0; i < size; i++) {
5614 		if (new->entries[i].threshold <= usage) {
5615 			/*
5616 			 * new->current_threshold will not be used until
5617 			 * rcu_assign_pointer(), so it's safe to increment
5618 			 * it here.
5619 			 */
5620 			++new->current_threshold;
5621 		} else
5622 			break;
5623 	}
5624 
5625 	/* Free old spare buffer and save old primary buffer as spare */
5626 	kfree(thresholds->spare);
5627 	thresholds->spare = thresholds->primary;
5628 
5629 	rcu_assign_pointer(thresholds->primary, new);
5630 
5631 	/* To be sure that nobody uses thresholds */
5632 	synchronize_rcu();
5633 
5634 unlock:
5635 	mutex_unlock(&memcg->thresholds_lock);
5636 
5637 	return ret;
5638 }
5639 
5640 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5641 	struct eventfd_ctx *eventfd, const char *args)
5642 {
5643 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
5644 }
5645 
5646 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
5647 	struct eventfd_ctx *eventfd, const char *args)
5648 {
5649 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
5650 }
5651 
5652 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5653 	struct eventfd_ctx *eventfd, enum res_type type)
5654 {
5655 	struct mem_cgroup_thresholds *thresholds;
5656 	struct mem_cgroup_threshold_ary *new;
5657 	u64 usage;
5658 	int i, j, size;
5659 
5660 	mutex_lock(&memcg->thresholds_lock);
5661 	if (type == _MEM)
5662 		thresholds = &memcg->thresholds;
5663 	else if (type == _MEMSWAP)
5664 		thresholds = &memcg->memsw_thresholds;
5665 	else
5666 		BUG();
5667 
5668 	if (!thresholds->primary)
5669 		goto unlock;
5670 
5671 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5672 
5673 	/* Check if a threshold crossed before removing */
5674 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5675 
5676 	/* Calculate new number of threshold */
5677 	size = 0;
5678 	for (i = 0; i < thresholds->primary->size; i++) {
5679 		if (thresholds->primary->entries[i].eventfd != eventfd)
5680 			size++;
5681 	}
5682 
5683 	new = thresholds->spare;
5684 
5685 	/* Set thresholds array to NULL if we don't have thresholds */
5686 	if (!size) {
5687 		kfree(new);
5688 		new = NULL;
5689 		goto swap_buffers;
5690 	}
5691 
5692 	new->size = size;
5693 
5694 	/* Copy thresholds and find current threshold */
5695 	new->current_threshold = -1;
5696 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5697 		if (thresholds->primary->entries[i].eventfd == eventfd)
5698 			continue;
5699 
5700 		new->entries[j] = thresholds->primary->entries[i];
5701 		if (new->entries[j].threshold <= usage) {
5702 			/*
5703 			 * new->current_threshold will not be used
5704 			 * until rcu_assign_pointer(), so it's safe to increment
5705 			 * it here.
5706 			 */
5707 			++new->current_threshold;
5708 		}
5709 		j++;
5710 	}
5711 
5712 swap_buffers:
5713 	/* Swap primary and spare array */
5714 	thresholds->spare = thresholds->primary;
5715 	/* If all events are unregistered, free the spare array */
5716 	if (!new) {
5717 		kfree(thresholds->spare);
5718 		thresholds->spare = NULL;
5719 	}
5720 
5721 	rcu_assign_pointer(thresholds->primary, new);
5722 
5723 	/* To be sure that nobody uses thresholds */
5724 	synchronize_rcu();
5725 unlock:
5726 	mutex_unlock(&memcg->thresholds_lock);
5727 }
5728 
5729 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5730 	struct eventfd_ctx *eventfd)
5731 {
5732 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
5733 }
5734 
5735 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5736 	struct eventfd_ctx *eventfd)
5737 {
5738 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
5739 }
5740 
5741 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
5742 	struct eventfd_ctx *eventfd, const char *args)
5743 {
5744 	struct mem_cgroup_eventfd_list *event;
5745 
5746 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
5747 	if (!event)
5748 		return -ENOMEM;
5749 
5750 	spin_lock(&memcg_oom_lock);
5751 
5752 	event->eventfd = eventfd;
5753 	list_add(&event->list, &memcg->oom_notify);
5754 
5755 	/* already in OOM ? */
5756 	if (atomic_read(&memcg->under_oom))
5757 		eventfd_signal(eventfd, 1);
5758 	spin_unlock(&memcg_oom_lock);
5759 
5760 	return 0;
5761 }
5762 
5763 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
5764 	struct eventfd_ctx *eventfd)
5765 {
5766 	struct mem_cgroup_eventfd_list *ev, *tmp;
5767 
5768 	spin_lock(&memcg_oom_lock);
5769 
5770 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
5771 		if (ev->eventfd == eventfd) {
5772 			list_del(&ev->list);
5773 			kfree(ev);
5774 		}
5775 	}
5776 
5777 	spin_unlock(&memcg_oom_lock);
5778 }
5779 
5780 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
5781 {
5782 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5783 
5784 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5785 	seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
5786 	return 0;
5787 }
5788 
5789 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5790 	struct cftype *cft, u64 val)
5791 {
5792 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5793 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5794 
5795 	/* cannot set to root cgroup and only 0 and 1 are allowed */
5796 	if (!parent || !((val == 0) || (val == 1)))
5797 		return -EINVAL;
5798 
5799 	mutex_lock(&memcg_create_mutex);
5800 	/* oom-kill-disable is a flag for subhierarchy. */
5801 	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5802 		mutex_unlock(&memcg_create_mutex);
5803 		return -EINVAL;
5804 	}
5805 	memcg->oom_kill_disable = val;
5806 	if (!val)
5807 		memcg_oom_recover(memcg);
5808 	mutex_unlock(&memcg_create_mutex);
5809 	return 0;
5810 }
5811 
5812 #ifdef CONFIG_MEMCG_KMEM
5813 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5814 {
5815 	int ret;
5816 
5817 	memcg->kmemcg_id = -1;
5818 	ret = memcg_propagate_kmem(memcg);
5819 	if (ret)
5820 		return ret;
5821 
5822 	return mem_cgroup_sockets_init(memcg, ss);
5823 }
5824 
5825 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5826 {
5827 	mem_cgroup_sockets_destroy(memcg);
5828 }
5829 
5830 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5831 {
5832 	if (!memcg_kmem_is_active(memcg))
5833 		return;
5834 
5835 	/*
5836 	 * kmem charges can outlive the cgroup. In the case of slab
5837 	 * pages, for instance, a page contain objects from various
5838 	 * processes. As we prevent from taking a reference for every
5839 	 * such allocation we have to be careful when doing uncharge
5840 	 * (see memcg_uncharge_kmem) and here during offlining.
5841 	 *
5842 	 * The idea is that that only the _last_ uncharge which sees
5843 	 * the dead memcg will drop the last reference. An additional
5844 	 * reference is taken here before the group is marked dead
5845 	 * which is then paired with css_put during uncharge resp. here.
5846 	 *
5847 	 * Although this might sound strange as this path is called from
5848 	 * css_offline() when the referencemight have dropped down to 0
5849 	 * and shouldn't be incremented anymore (css_tryget would fail)
5850 	 * we do not have other options because of the kmem allocations
5851 	 * lifetime.
5852 	 */
5853 	css_get(&memcg->css);
5854 
5855 	memcg_kmem_mark_dead(memcg);
5856 
5857 	if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5858 		return;
5859 
5860 	if (memcg_kmem_test_and_clear_dead(memcg))
5861 		css_put(&memcg->css);
5862 }
5863 #else
5864 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5865 {
5866 	return 0;
5867 }
5868 
5869 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5870 {
5871 }
5872 
5873 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5874 {
5875 }
5876 #endif
5877 
5878 /*
5879  * DO NOT USE IN NEW FILES.
5880  *
5881  * "cgroup.event_control" implementation.
5882  *
5883  * This is way over-engineered.  It tries to support fully configurable
5884  * events for each user.  Such level of flexibility is completely
5885  * unnecessary especially in the light of the planned unified hierarchy.
5886  *
5887  * Please deprecate this and replace with something simpler if at all
5888  * possible.
5889  */
5890 
5891 /*
5892  * Unregister event and free resources.
5893  *
5894  * Gets called from workqueue.
5895  */
5896 static void memcg_event_remove(struct work_struct *work)
5897 {
5898 	struct mem_cgroup_event *event =
5899 		container_of(work, struct mem_cgroup_event, remove);
5900 	struct mem_cgroup *memcg = event->memcg;
5901 
5902 	remove_wait_queue(event->wqh, &event->wait);
5903 
5904 	event->unregister_event(memcg, event->eventfd);
5905 
5906 	/* Notify userspace the event is going away. */
5907 	eventfd_signal(event->eventfd, 1);
5908 
5909 	eventfd_ctx_put(event->eventfd);
5910 	kfree(event);
5911 	css_put(&memcg->css);
5912 }
5913 
5914 /*
5915  * Gets called on POLLHUP on eventfd when user closes it.
5916  *
5917  * Called with wqh->lock held and interrupts disabled.
5918  */
5919 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5920 			    int sync, void *key)
5921 {
5922 	struct mem_cgroup_event *event =
5923 		container_of(wait, struct mem_cgroup_event, wait);
5924 	struct mem_cgroup *memcg = event->memcg;
5925 	unsigned long flags = (unsigned long)key;
5926 
5927 	if (flags & POLLHUP) {
5928 		/*
5929 		 * If the event has been detached at cgroup removal, we
5930 		 * can simply return knowing the other side will cleanup
5931 		 * for us.
5932 		 *
5933 		 * We can't race against event freeing since the other
5934 		 * side will require wqh->lock via remove_wait_queue(),
5935 		 * which we hold.
5936 		 */
5937 		spin_lock(&memcg->event_list_lock);
5938 		if (!list_empty(&event->list)) {
5939 			list_del_init(&event->list);
5940 			/*
5941 			 * We are in atomic context, but cgroup_event_remove()
5942 			 * may sleep, so we have to call it in workqueue.
5943 			 */
5944 			schedule_work(&event->remove);
5945 		}
5946 		spin_unlock(&memcg->event_list_lock);
5947 	}
5948 
5949 	return 0;
5950 }
5951 
5952 static void memcg_event_ptable_queue_proc(struct file *file,
5953 		wait_queue_head_t *wqh, poll_table *pt)
5954 {
5955 	struct mem_cgroup_event *event =
5956 		container_of(pt, struct mem_cgroup_event, pt);
5957 
5958 	event->wqh = wqh;
5959 	add_wait_queue(wqh, &event->wait);
5960 }
5961 
5962 /*
5963  * DO NOT USE IN NEW FILES.
5964  *
5965  * Parse input and register new cgroup event handler.
5966  *
5967  * Input must be in format '<event_fd> <control_fd> <args>'.
5968  * Interpretation of args is defined by control file implementation.
5969  */
5970 static int memcg_write_event_control(struct cgroup_subsys_state *css,
5971 				     struct cftype *cft, char *buffer)
5972 {
5973 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5974 	struct mem_cgroup_event *event;
5975 	struct cgroup_subsys_state *cfile_css;
5976 	unsigned int efd, cfd;
5977 	struct fd efile;
5978 	struct fd cfile;
5979 	const char *name;
5980 	char *endp;
5981 	int ret;
5982 
5983 	efd = simple_strtoul(buffer, &endp, 10);
5984 	if (*endp != ' ')
5985 		return -EINVAL;
5986 	buffer = endp + 1;
5987 
5988 	cfd = simple_strtoul(buffer, &endp, 10);
5989 	if ((*endp != ' ') && (*endp != '\0'))
5990 		return -EINVAL;
5991 	buffer = endp + 1;
5992 
5993 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5994 	if (!event)
5995 		return -ENOMEM;
5996 
5997 	event->memcg = memcg;
5998 	INIT_LIST_HEAD(&event->list);
5999 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
6000 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
6001 	INIT_WORK(&event->remove, memcg_event_remove);
6002 
6003 	efile = fdget(efd);
6004 	if (!efile.file) {
6005 		ret = -EBADF;
6006 		goto out_kfree;
6007 	}
6008 
6009 	event->eventfd = eventfd_ctx_fileget(efile.file);
6010 	if (IS_ERR(event->eventfd)) {
6011 		ret = PTR_ERR(event->eventfd);
6012 		goto out_put_efile;
6013 	}
6014 
6015 	cfile = fdget(cfd);
6016 	if (!cfile.file) {
6017 		ret = -EBADF;
6018 		goto out_put_eventfd;
6019 	}
6020 
6021 	/* the process need read permission on control file */
6022 	/* AV: shouldn't we check that it's been opened for read instead? */
6023 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
6024 	if (ret < 0)
6025 		goto out_put_cfile;
6026 
6027 	/*
6028 	 * Determine the event callbacks and set them in @event.  This used
6029 	 * to be done via struct cftype but cgroup core no longer knows
6030 	 * about these events.  The following is crude but the whole thing
6031 	 * is for compatibility anyway.
6032 	 *
6033 	 * DO NOT ADD NEW FILES.
6034 	 */
6035 	name = cfile.file->f_dentry->d_name.name;
6036 
6037 	if (!strcmp(name, "memory.usage_in_bytes")) {
6038 		event->register_event = mem_cgroup_usage_register_event;
6039 		event->unregister_event = mem_cgroup_usage_unregister_event;
6040 	} else if (!strcmp(name, "memory.oom_control")) {
6041 		event->register_event = mem_cgroup_oom_register_event;
6042 		event->unregister_event = mem_cgroup_oom_unregister_event;
6043 	} else if (!strcmp(name, "memory.pressure_level")) {
6044 		event->register_event = vmpressure_register_event;
6045 		event->unregister_event = vmpressure_unregister_event;
6046 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
6047 		event->register_event = memsw_cgroup_usage_register_event;
6048 		event->unregister_event = memsw_cgroup_usage_unregister_event;
6049 	} else {
6050 		ret = -EINVAL;
6051 		goto out_put_cfile;
6052 	}
6053 
6054 	/*
6055 	 * Verify @cfile should belong to @css.  Also, remaining events are
6056 	 * automatically removed on cgroup destruction but the removal is
6057 	 * asynchronous, so take an extra ref on @css.
6058 	 */
6059 	cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
6060 					&memory_cgrp_subsys);
6061 	ret = -EINVAL;
6062 	if (IS_ERR(cfile_css))
6063 		goto out_put_cfile;
6064 	if (cfile_css != css) {
6065 		css_put(cfile_css);
6066 		goto out_put_cfile;
6067 	}
6068 
6069 	ret = event->register_event(memcg, event->eventfd, buffer);
6070 	if (ret)
6071 		goto out_put_css;
6072 
6073 	efile.file->f_op->poll(efile.file, &event->pt);
6074 
6075 	spin_lock(&memcg->event_list_lock);
6076 	list_add(&event->list, &memcg->event_list);
6077 	spin_unlock(&memcg->event_list_lock);
6078 
6079 	fdput(cfile);
6080 	fdput(efile);
6081 
6082 	return 0;
6083 
6084 out_put_css:
6085 	css_put(css);
6086 out_put_cfile:
6087 	fdput(cfile);
6088 out_put_eventfd:
6089 	eventfd_ctx_put(event->eventfd);
6090 out_put_efile:
6091 	fdput(efile);
6092 out_kfree:
6093 	kfree(event);
6094 
6095 	return ret;
6096 }
6097 
6098 static struct cftype mem_cgroup_files[] = {
6099 	{
6100 		.name = "usage_in_bytes",
6101 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
6102 		.read_u64 = mem_cgroup_read_u64,
6103 	},
6104 	{
6105 		.name = "max_usage_in_bytes",
6106 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6107 		.trigger = mem_cgroup_reset,
6108 		.read_u64 = mem_cgroup_read_u64,
6109 	},
6110 	{
6111 		.name = "limit_in_bytes",
6112 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
6113 		.write_string = mem_cgroup_write,
6114 		.read_u64 = mem_cgroup_read_u64,
6115 	},
6116 	{
6117 		.name = "soft_limit_in_bytes",
6118 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
6119 		.write_string = mem_cgroup_write,
6120 		.read_u64 = mem_cgroup_read_u64,
6121 	},
6122 	{
6123 		.name = "failcnt",
6124 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6125 		.trigger = mem_cgroup_reset,
6126 		.read_u64 = mem_cgroup_read_u64,
6127 	},
6128 	{
6129 		.name = "stat",
6130 		.seq_show = memcg_stat_show,
6131 	},
6132 	{
6133 		.name = "force_empty",
6134 		.trigger = mem_cgroup_force_empty_write,
6135 	},
6136 	{
6137 		.name = "use_hierarchy",
6138 		.flags = CFTYPE_INSANE,
6139 		.write_u64 = mem_cgroup_hierarchy_write,
6140 		.read_u64 = mem_cgroup_hierarchy_read,
6141 	},
6142 	{
6143 		.name = "cgroup.event_control",		/* XXX: for compat */
6144 		.write_string = memcg_write_event_control,
6145 		.flags = CFTYPE_NO_PREFIX,
6146 		.mode = S_IWUGO,
6147 	},
6148 	{
6149 		.name = "swappiness",
6150 		.read_u64 = mem_cgroup_swappiness_read,
6151 		.write_u64 = mem_cgroup_swappiness_write,
6152 	},
6153 	{
6154 		.name = "move_charge_at_immigrate",
6155 		.read_u64 = mem_cgroup_move_charge_read,
6156 		.write_u64 = mem_cgroup_move_charge_write,
6157 	},
6158 	{
6159 		.name = "oom_control",
6160 		.seq_show = mem_cgroup_oom_control_read,
6161 		.write_u64 = mem_cgroup_oom_control_write,
6162 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6163 	},
6164 	{
6165 		.name = "pressure_level",
6166 	},
6167 #ifdef CONFIG_NUMA
6168 	{
6169 		.name = "numa_stat",
6170 		.seq_show = memcg_numa_stat_show,
6171 	},
6172 #endif
6173 #ifdef CONFIG_MEMCG_KMEM
6174 	{
6175 		.name = "kmem.limit_in_bytes",
6176 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6177 		.write_string = mem_cgroup_write,
6178 		.read_u64 = mem_cgroup_read_u64,
6179 	},
6180 	{
6181 		.name = "kmem.usage_in_bytes",
6182 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6183 		.read_u64 = mem_cgroup_read_u64,
6184 	},
6185 	{
6186 		.name = "kmem.failcnt",
6187 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6188 		.trigger = mem_cgroup_reset,
6189 		.read_u64 = mem_cgroup_read_u64,
6190 	},
6191 	{
6192 		.name = "kmem.max_usage_in_bytes",
6193 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6194 		.trigger = mem_cgroup_reset,
6195 		.read_u64 = mem_cgroup_read_u64,
6196 	},
6197 #ifdef CONFIG_SLABINFO
6198 	{
6199 		.name = "kmem.slabinfo",
6200 		.seq_show = mem_cgroup_slabinfo_read,
6201 	},
6202 #endif
6203 #endif
6204 	{ },	/* terminate */
6205 };
6206 
6207 #ifdef CONFIG_MEMCG_SWAP
6208 static struct cftype memsw_cgroup_files[] = {
6209 	{
6210 		.name = "memsw.usage_in_bytes",
6211 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6212 		.read_u64 = mem_cgroup_read_u64,
6213 	},
6214 	{
6215 		.name = "memsw.max_usage_in_bytes",
6216 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6217 		.trigger = mem_cgroup_reset,
6218 		.read_u64 = mem_cgroup_read_u64,
6219 	},
6220 	{
6221 		.name = "memsw.limit_in_bytes",
6222 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6223 		.write_string = mem_cgroup_write,
6224 		.read_u64 = mem_cgroup_read_u64,
6225 	},
6226 	{
6227 		.name = "memsw.failcnt",
6228 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6229 		.trigger = mem_cgroup_reset,
6230 		.read_u64 = mem_cgroup_read_u64,
6231 	},
6232 	{ },	/* terminate */
6233 };
6234 #endif
6235 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6236 {
6237 	struct mem_cgroup_per_node *pn;
6238 	struct mem_cgroup_per_zone *mz;
6239 	int zone, tmp = node;
6240 	/*
6241 	 * This routine is called against possible nodes.
6242 	 * But it's BUG to call kmalloc() against offline node.
6243 	 *
6244 	 * TODO: this routine can waste much memory for nodes which will
6245 	 *       never be onlined. It's better to use memory hotplug callback
6246 	 *       function.
6247 	 */
6248 	if (!node_state(node, N_NORMAL_MEMORY))
6249 		tmp = -1;
6250 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6251 	if (!pn)
6252 		return 1;
6253 
6254 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6255 		mz = &pn->zoneinfo[zone];
6256 		lruvec_init(&mz->lruvec);
6257 		mz->usage_in_excess = 0;
6258 		mz->on_tree = false;
6259 		mz->memcg = memcg;
6260 	}
6261 	memcg->nodeinfo[node] = pn;
6262 	return 0;
6263 }
6264 
6265 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6266 {
6267 	kfree(memcg->nodeinfo[node]);
6268 }
6269 
6270 static struct mem_cgroup *mem_cgroup_alloc(void)
6271 {
6272 	struct mem_cgroup *memcg;
6273 	size_t size;
6274 
6275 	size = sizeof(struct mem_cgroup);
6276 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
6277 
6278 	memcg = kzalloc(size, GFP_KERNEL);
6279 	if (!memcg)
6280 		return NULL;
6281 
6282 	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6283 	if (!memcg->stat)
6284 		goto out_free;
6285 	spin_lock_init(&memcg->pcp_counter_lock);
6286 	return memcg;
6287 
6288 out_free:
6289 	kfree(memcg);
6290 	return NULL;
6291 }
6292 
6293 /*
6294  * At destroying mem_cgroup, references from swap_cgroup can remain.
6295  * (scanning all at force_empty is too costly...)
6296  *
6297  * Instead of clearing all references at force_empty, we remember
6298  * the number of reference from swap_cgroup and free mem_cgroup when
6299  * it goes down to 0.
6300  *
6301  * Removal of cgroup itself succeeds regardless of refs from swap.
6302  */
6303 
6304 static void __mem_cgroup_free(struct mem_cgroup *memcg)
6305 {
6306 	int node;
6307 
6308 	mem_cgroup_remove_from_trees(memcg);
6309 
6310 	for_each_node(node)
6311 		free_mem_cgroup_per_zone_info(memcg, node);
6312 
6313 	free_percpu(memcg->stat);
6314 
6315 	/*
6316 	 * We need to make sure that (at least for now), the jump label
6317 	 * destruction code runs outside of the cgroup lock. This is because
6318 	 * get_online_cpus(), which is called from the static_branch update,
6319 	 * can't be called inside the cgroup_lock. cpusets are the ones
6320 	 * enforcing this dependency, so if they ever change, we might as well.
6321 	 *
6322 	 * schedule_work() will guarantee this happens. Be careful if you need
6323 	 * to move this code around, and make sure it is outside
6324 	 * the cgroup_lock.
6325 	 */
6326 	disarm_static_keys(memcg);
6327 	kfree(memcg);
6328 }
6329 
6330 /*
6331  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6332  */
6333 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6334 {
6335 	if (!memcg->res.parent)
6336 		return NULL;
6337 	return mem_cgroup_from_res_counter(memcg->res.parent, res);
6338 }
6339 EXPORT_SYMBOL(parent_mem_cgroup);
6340 
6341 static void __init mem_cgroup_soft_limit_tree_init(void)
6342 {
6343 	struct mem_cgroup_tree_per_node *rtpn;
6344 	struct mem_cgroup_tree_per_zone *rtpz;
6345 	int tmp, node, zone;
6346 
6347 	for_each_node(node) {
6348 		tmp = node;
6349 		if (!node_state(node, N_NORMAL_MEMORY))
6350 			tmp = -1;
6351 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6352 		BUG_ON(!rtpn);
6353 
6354 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6355 
6356 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6357 			rtpz = &rtpn->rb_tree_per_zone[zone];
6358 			rtpz->rb_root = RB_ROOT;
6359 			spin_lock_init(&rtpz->lock);
6360 		}
6361 	}
6362 }
6363 
6364 static struct cgroup_subsys_state * __ref
6365 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6366 {
6367 	struct mem_cgroup *memcg;
6368 	long error = -ENOMEM;
6369 	int node;
6370 
6371 	memcg = mem_cgroup_alloc();
6372 	if (!memcg)
6373 		return ERR_PTR(error);
6374 
6375 	for_each_node(node)
6376 		if (alloc_mem_cgroup_per_zone_info(memcg, node))
6377 			goto free_out;
6378 
6379 	/* root ? */
6380 	if (parent_css == NULL) {
6381 		root_mem_cgroup = memcg;
6382 		res_counter_init(&memcg->res, NULL);
6383 		res_counter_init(&memcg->memsw, NULL);
6384 		res_counter_init(&memcg->kmem, NULL);
6385 	}
6386 
6387 	memcg->last_scanned_node = MAX_NUMNODES;
6388 	INIT_LIST_HEAD(&memcg->oom_notify);
6389 	memcg->move_charge_at_immigrate = 0;
6390 	mutex_init(&memcg->thresholds_lock);
6391 	spin_lock_init(&memcg->move_lock);
6392 	vmpressure_init(&memcg->vmpressure);
6393 	INIT_LIST_HEAD(&memcg->event_list);
6394 	spin_lock_init(&memcg->event_list_lock);
6395 
6396 	return &memcg->css;
6397 
6398 free_out:
6399 	__mem_cgroup_free(memcg);
6400 	return ERR_PTR(error);
6401 }
6402 
6403 static int
6404 mem_cgroup_css_online(struct cgroup_subsys_state *css)
6405 {
6406 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6407 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
6408 
6409 	if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6410 		return -ENOSPC;
6411 
6412 	if (!parent)
6413 		return 0;
6414 
6415 	mutex_lock(&memcg_create_mutex);
6416 
6417 	memcg->use_hierarchy = parent->use_hierarchy;
6418 	memcg->oom_kill_disable = parent->oom_kill_disable;
6419 	memcg->swappiness = mem_cgroup_swappiness(parent);
6420 
6421 	if (parent->use_hierarchy) {
6422 		res_counter_init(&memcg->res, &parent->res);
6423 		res_counter_init(&memcg->memsw, &parent->memsw);
6424 		res_counter_init(&memcg->kmem, &parent->kmem);
6425 
6426 		/*
6427 		 * No need to take a reference to the parent because cgroup
6428 		 * core guarantees its existence.
6429 		 */
6430 	} else {
6431 		res_counter_init(&memcg->res, NULL);
6432 		res_counter_init(&memcg->memsw, NULL);
6433 		res_counter_init(&memcg->kmem, NULL);
6434 		/*
6435 		 * Deeper hierachy with use_hierarchy == false doesn't make
6436 		 * much sense so let cgroup subsystem know about this
6437 		 * unfortunate state in our controller.
6438 		 */
6439 		if (parent != root_mem_cgroup)
6440 			memory_cgrp_subsys.broken_hierarchy = true;
6441 	}
6442 	mutex_unlock(&memcg_create_mutex);
6443 
6444 	return memcg_init_kmem(memcg, &memory_cgrp_subsys);
6445 }
6446 
6447 /*
6448  * Announce all parents that a group from their hierarchy is gone.
6449  */
6450 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6451 {
6452 	struct mem_cgroup *parent = memcg;
6453 
6454 	while ((parent = parent_mem_cgroup(parent)))
6455 		mem_cgroup_iter_invalidate(parent);
6456 
6457 	/*
6458 	 * if the root memcg is not hierarchical we have to check it
6459 	 * explicitely.
6460 	 */
6461 	if (!root_mem_cgroup->use_hierarchy)
6462 		mem_cgroup_iter_invalidate(root_mem_cgroup);
6463 }
6464 
6465 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6466 {
6467 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6468 	struct mem_cgroup_event *event, *tmp;
6469 	struct cgroup_subsys_state *iter;
6470 
6471 	/*
6472 	 * Unregister events and notify userspace.
6473 	 * Notify userspace about cgroup removing only after rmdir of cgroup
6474 	 * directory to avoid race between userspace and kernelspace.
6475 	 */
6476 	spin_lock(&memcg->event_list_lock);
6477 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
6478 		list_del_init(&event->list);
6479 		schedule_work(&event->remove);
6480 	}
6481 	spin_unlock(&memcg->event_list_lock);
6482 
6483 	kmem_cgroup_css_offline(memcg);
6484 
6485 	mem_cgroup_invalidate_reclaim_iterators(memcg);
6486 
6487 	/*
6488 	 * This requires that offlining is serialized.  Right now that is
6489 	 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6490 	 */
6491 	css_for_each_descendant_post(iter, css)
6492 		mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6493 
6494 	mem_cgroup_destroy_all_caches(memcg);
6495 	vmpressure_cleanup(&memcg->vmpressure);
6496 }
6497 
6498 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6499 {
6500 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6501 	/*
6502 	 * XXX: css_offline() would be where we should reparent all
6503 	 * memory to prepare the cgroup for destruction.  However,
6504 	 * memcg does not do css_tryget() and res_counter charging
6505 	 * under the same RCU lock region, which means that charging
6506 	 * could race with offlining.  Offlining only happens to
6507 	 * cgroups with no tasks in them but charges can show up
6508 	 * without any tasks from the swapin path when the target
6509 	 * memcg is looked up from the swapout record and not from the
6510 	 * current task as it usually is.  A race like this can leak
6511 	 * charges and put pages with stale cgroup pointers into
6512 	 * circulation:
6513 	 *
6514 	 * #0                        #1
6515 	 *                           lookup_swap_cgroup_id()
6516 	 *                           rcu_read_lock()
6517 	 *                           mem_cgroup_lookup()
6518 	 *                           css_tryget()
6519 	 *                           rcu_read_unlock()
6520 	 * disable css_tryget()
6521 	 * call_rcu()
6522 	 *   offline_css()
6523 	 *     reparent_charges()
6524 	 *                           res_counter_charge()
6525 	 *                           css_put()
6526 	 *                             css_free()
6527 	 *                           pc->mem_cgroup = dead memcg
6528 	 *                           add page to lru
6529 	 *
6530 	 * The bulk of the charges are still moved in offline_css() to
6531 	 * avoid pinning a lot of pages in case a long-term reference
6532 	 * like a swapout record is deferring the css_free() to long
6533 	 * after offlining.  But this makes sure we catch any charges
6534 	 * made after offlining:
6535 	 */
6536 	mem_cgroup_reparent_charges(memcg);
6537 
6538 	memcg_destroy_kmem(memcg);
6539 	__mem_cgroup_free(memcg);
6540 }
6541 
6542 #ifdef CONFIG_MMU
6543 /* Handlers for move charge at task migration. */
6544 #define PRECHARGE_COUNT_AT_ONCE	256
6545 static int mem_cgroup_do_precharge(unsigned long count)
6546 {
6547 	int ret = 0;
6548 	int batch_count = PRECHARGE_COUNT_AT_ONCE;
6549 	struct mem_cgroup *memcg = mc.to;
6550 
6551 	if (mem_cgroup_is_root(memcg)) {
6552 		mc.precharge += count;
6553 		/* we don't need css_get for root */
6554 		return ret;
6555 	}
6556 	/* try to charge at once */
6557 	if (count > 1) {
6558 		struct res_counter *dummy;
6559 		/*
6560 		 * "memcg" cannot be under rmdir() because we've already checked
6561 		 * by cgroup_lock_live_cgroup() that it is not removed and we
6562 		 * are still under the same cgroup_mutex. So we can postpone
6563 		 * css_get().
6564 		 */
6565 		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
6566 			goto one_by_one;
6567 		if (do_swap_account && res_counter_charge(&memcg->memsw,
6568 						PAGE_SIZE * count, &dummy)) {
6569 			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
6570 			goto one_by_one;
6571 		}
6572 		mc.precharge += count;
6573 		return ret;
6574 	}
6575 one_by_one:
6576 	/* fall back to one by one charge */
6577 	while (count--) {
6578 		if (signal_pending(current)) {
6579 			ret = -EINTR;
6580 			break;
6581 		}
6582 		if (!batch_count--) {
6583 			batch_count = PRECHARGE_COUNT_AT_ONCE;
6584 			cond_resched();
6585 		}
6586 		ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
6587 		if (ret)
6588 			/* mem_cgroup_clear_mc() will do uncharge later */
6589 			return ret;
6590 		mc.precharge++;
6591 	}
6592 	return ret;
6593 }
6594 
6595 /**
6596  * get_mctgt_type - get target type of moving charge
6597  * @vma: the vma the pte to be checked belongs
6598  * @addr: the address corresponding to the pte to be checked
6599  * @ptent: the pte to be checked
6600  * @target: the pointer the target page or swap ent will be stored(can be NULL)
6601  *
6602  * Returns
6603  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
6604  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6605  *     move charge. if @target is not NULL, the page is stored in target->page
6606  *     with extra refcnt got(Callers should handle it).
6607  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6608  *     target for charge migration. if @target is not NULL, the entry is stored
6609  *     in target->ent.
6610  *
6611  * Called with pte lock held.
6612  */
6613 union mc_target {
6614 	struct page	*page;
6615 	swp_entry_t	ent;
6616 };
6617 
6618 enum mc_target_type {
6619 	MC_TARGET_NONE = 0,
6620 	MC_TARGET_PAGE,
6621 	MC_TARGET_SWAP,
6622 };
6623 
6624 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6625 						unsigned long addr, pte_t ptent)
6626 {
6627 	struct page *page = vm_normal_page(vma, addr, ptent);
6628 
6629 	if (!page || !page_mapped(page))
6630 		return NULL;
6631 	if (PageAnon(page)) {
6632 		/* we don't move shared anon */
6633 		if (!move_anon())
6634 			return NULL;
6635 	} else if (!move_file())
6636 		/* we ignore mapcount for file pages */
6637 		return NULL;
6638 	if (!get_page_unless_zero(page))
6639 		return NULL;
6640 
6641 	return page;
6642 }
6643 
6644 #ifdef CONFIG_SWAP
6645 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6646 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6647 {
6648 	struct page *page = NULL;
6649 	swp_entry_t ent = pte_to_swp_entry(ptent);
6650 
6651 	if (!move_anon() || non_swap_entry(ent))
6652 		return NULL;
6653 	/*
6654 	 * Because lookup_swap_cache() updates some statistics counter,
6655 	 * we call find_get_page() with swapper_space directly.
6656 	 */
6657 	page = find_get_page(swap_address_space(ent), ent.val);
6658 	if (do_swap_account)
6659 		entry->val = ent.val;
6660 
6661 	return page;
6662 }
6663 #else
6664 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6665 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6666 {
6667 	return NULL;
6668 }
6669 #endif
6670 
6671 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6672 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6673 {
6674 	struct page *page = NULL;
6675 	struct address_space *mapping;
6676 	pgoff_t pgoff;
6677 
6678 	if (!vma->vm_file) /* anonymous vma */
6679 		return NULL;
6680 	if (!move_file())
6681 		return NULL;
6682 
6683 	mapping = vma->vm_file->f_mapping;
6684 	if (pte_none(ptent))
6685 		pgoff = linear_page_index(vma, addr);
6686 	else /* pte_file(ptent) is true */
6687 		pgoff = pte_to_pgoff(ptent);
6688 
6689 	/* page is moved even if it's not RSS of this task(page-faulted). */
6690 #ifdef CONFIG_SWAP
6691 	/* shmem/tmpfs may report page out on swap: account for that too. */
6692 	if (shmem_mapping(mapping)) {
6693 		page = find_get_entry(mapping, pgoff);
6694 		if (radix_tree_exceptional_entry(page)) {
6695 			swp_entry_t swp = radix_to_swp_entry(page);
6696 			if (do_swap_account)
6697 				*entry = swp;
6698 			page = find_get_page(swap_address_space(swp), swp.val);
6699 		}
6700 	} else
6701 		page = find_get_page(mapping, pgoff);
6702 #else
6703 	page = find_get_page(mapping, pgoff);
6704 #endif
6705 	return page;
6706 }
6707 
6708 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6709 		unsigned long addr, pte_t ptent, union mc_target *target)
6710 {
6711 	struct page *page = NULL;
6712 	struct page_cgroup *pc;
6713 	enum mc_target_type ret = MC_TARGET_NONE;
6714 	swp_entry_t ent = { .val = 0 };
6715 
6716 	if (pte_present(ptent))
6717 		page = mc_handle_present_pte(vma, addr, ptent);
6718 	else if (is_swap_pte(ptent))
6719 		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6720 	else if (pte_none(ptent) || pte_file(ptent))
6721 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
6722 
6723 	if (!page && !ent.val)
6724 		return ret;
6725 	if (page) {
6726 		pc = lookup_page_cgroup(page);
6727 		/*
6728 		 * Do only loose check w/o page_cgroup lock.
6729 		 * mem_cgroup_move_account() checks the pc is valid or not under
6730 		 * the lock.
6731 		 */
6732 		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6733 			ret = MC_TARGET_PAGE;
6734 			if (target)
6735 				target->page = page;
6736 		}
6737 		if (!ret || !target)
6738 			put_page(page);
6739 	}
6740 	/* There is a swap entry and a page doesn't exist or isn't charged */
6741 	if (ent.val && !ret &&
6742 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6743 		ret = MC_TARGET_SWAP;
6744 		if (target)
6745 			target->ent = ent;
6746 	}
6747 	return ret;
6748 }
6749 
6750 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6751 /*
6752  * We don't consider swapping or file mapped pages because THP does not
6753  * support them for now.
6754  * Caller should make sure that pmd_trans_huge(pmd) is true.
6755  */
6756 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6757 		unsigned long addr, pmd_t pmd, union mc_target *target)
6758 {
6759 	struct page *page = NULL;
6760 	struct page_cgroup *pc;
6761 	enum mc_target_type ret = MC_TARGET_NONE;
6762 
6763 	page = pmd_page(pmd);
6764 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6765 	if (!move_anon())
6766 		return ret;
6767 	pc = lookup_page_cgroup(page);
6768 	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6769 		ret = MC_TARGET_PAGE;
6770 		if (target) {
6771 			get_page(page);
6772 			target->page = page;
6773 		}
6774 	}
6775 	return ret;
6776 }
6777 #else
6778 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6779 		unsigned long addr, pmd_t pmd, union mc_target *target)
6780 {
6781 	return MC_TARGET_NONE;
6782 }
6783 #endif
6784 
6785 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6786 					unsigned long addr, unsigned long end,
6787 					struct mm_walk *walk)
6788 {
6789 	struct vm_area_struct *vma = walk->private;
6790 	pte_t *pte;
6791 	spinlock_t *ptl;
6792 
6793 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6794 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6795 			mc.precharge += HPAGE_PMD_NR;
6796 		spin_unlock(ptl);
6797 		return 0;
6798 	}
6799 
6800 	if (pmd_trans_unstable(pmd))
6801 		return 0;
6802 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6803 	for (; addr != end; pte++, addr += PAGE_SIZE)
6804 		if (get_mctgt_type(vma, addr, *pte, NULL))
6805 			mc.precharge++;	/* increment precharge temporarily */
6806 	pte_unmap_unlock(pte - 1, ptl);
6807 	cond_resched();
6808 
6809 	return 0;
6810 }
6811 
6812 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6813 {
6814 	unsigned long precharge;
6815 	struct vm_area_struct *vma;
6816 
6817 	down_read(&mm->mmap_sem);
6818 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
6819 		struct mm_walk mem_cgroup_count_precharge_walk = {
6820 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
6821 			.mm = mm,
6822 			.private = vma,
6823 		};
6824 		if (is_vm_hugetlb_page(vma))
6825 			continue;
6826 		walk_page_range(vma->vm_start, vma->vm_end,
6827 					&mem_cgroup_count_precharge_walk);
6828 	}
6829 	up_read(&mm->mmap_sem);
6830 
6831 	precharge = mc.precharge;
6832 	mc.precharge = 0;
6833 
6834 	return precharge;
6835 }
6836 
6837 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6838 {
6839 	unsigned long precharge = mem_cgroup_count_precharge(mm);
6840 
6841 	VM_BUG_ON(mc.moving_task);
6842 	mc.moving_task = current;
6843 	return mem_cgroup_do_precharge(precharge);
6844 }
6845 
6846 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6847 static void __mem_cgroup_clear_mc(void)
6848 {
6849 	struct mem_cgroup *from = mc.from;
6850 	struct mem_cgroup *to = mc.to;
6851 	int i;
6852 
6853 	/* we must uncharge all the leftover precharges from mc.to */
6854 	if (mc.precharge) {
6855 		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
6856 		mc.precharge = 0;
6857 	}
6858 	/*
6859 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6860 	 * we must uncharge here.
6861 	 */
6862 	if (mc.moved_charge) {
6863 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6864 		mc.moved_charge = 0;
6865 	}
6866 	/* we must fixup refcnts and charges */
6867 	if (mc.moved_swap) {
6868 		/* uncharge swap account from the old cgroup */
6869 		if (!mem_cgroup_is_root(mc.from))
6870 			res_counter_uncharge(&mc.from->memsw,
6871 						PAGE_SIZE * mc.moved_swap);
6872 
6873 		for (i = 0; i < mc.moved_swap; i++)
6874 			css_put(&mc.from->css);
6875 
6876 		if (!mem_cgroup_is_root(mc.to)) {
6877 			/*
6878 			 * we charged both to->res and to->memsw, so we should
6879 			 * uncharge to->res.
6880 			 */
6881 			res_counter_uncharge(&mc.to->res,
6882 						PAGE_SIZE * mc.moved_swap);
6883 		}
6884 		/* we've already done css_get(mc.to) */
6885 		mc.moved_swap = 0;
6886 	}
6887 	memcg_oom_recover(from);
6888 	memcg_oom_recover(to);
6889 	wake_up_all(&mc.waitq);
6890 }
6891 
6892 static void mem_cgroup_clear_mc(void)
6893 {
6894 	struct mem_cgroup *from = mc.from;
6895 
6896 	/*
6897 	 * we must clear moving_task before waking up waiters at the end of
6898 	 * task migration.
6899 	 */
6900 	mc.moving_task = NULL;
6901 	__mem_cgroup_clear_mc();
6902 	spin_lock(&mc.lock);
6903 	mc.from = NULL;
6904 	mc.to = NULL;
6905 	spin_unlock(&mc.lock);
6906 	mem_cgroup_end_move(from);
6907 }
6908 
6909 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6910 				 struct cgroup_taskset *tset)
6911 {
6912 	struct task_struct *p = cgroup_taskset_first(tset);
6913 	int ret = 0;
6914 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6915 	unsigned long move_charge_at_immigrate;
6916 
6917 	/*
6918 	 * We are now commited to this value whatever it is. Changes in this
6919 	 * tunable will only affect upcoming migrations, not the current one.
6920 	 * So we need to save it, and keep it going.
6921 	 */
6922 	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
6923 	if (move_charge_at_immigrate) {
6924 		struct mm_struct *mm;
6925 		struct mem_cgroup *from = mem_cgroup_from_task(p);
6926 
6927 		VM_BUG_ON(from == memcg);
6928 
6929 		mm = get_task_mm(p);
6930 		if (!mm)
6931 			return 0;
6932 		/* We move charges only when we move a owner of the mm */
6933 		if (mm->owner == p) {
6934 			VM_BUG_ON(mc.from);
6935 			VM_BUG_ON(mc.to);
6936 			VM_BUG_ON(mc.precharge);
6937 			VM_BUG_ON(mc.moved_charge);
6938 			VM_BUG_ON(mc.moved_swap);
6939 			mem_cgroup_start_move(from);
6940 			spin_lock(&mc.lock);
6941 			mc.from = from;
6942 			mc.to = memcg;
6943 			mc.immigrate_flags = move_charge_at_immigrate;
6944 			spin_unlock(&mc.lock);
6945 			/* We set mc.moving_task later */
6946 
6947 			ret = mem_cgroup_precharge_mc(mm);
6948 			if (ret)
6949 				mem_cgroup_clear_mc();
6950 		}
6951 		mmput(mm);
6952 	}
6953 	return ret;
6954 }
6955 
6956 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6957 				     struct cgroup_taskset *tset)
6958 {
6959 	mem_cgroup_clear_mc();
6960 }
6961 
6962 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6963 				unsigned long addr, unsigned long end,
6964 				struct mm_walk *walk)
6965 {
6966 	int ret = 0;
6967 	struct vm_area_struct *vma = walk->private;
6968 	pte_t *pte;
6969 	spinlock_t *ptl;
6970 	enum mc_target_type target_type;
6971 	union mc_target target;
6972 	struct page *page;
6973 	struct page_cgroup *pc;
6974 
6975 	/*
6976 	 * We don't take compound_lock() here but no race with splitting thp
6977 	 * happens because:
6978 	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6979 	 *    under splitting, which means there's no concurrent thp split,
6980 	 *  - if another thread runs into split_huge_page() just after we
6981 	 *    entered this if-block, the thread must wait for page table lock
6982 	 *    to be unlocked in __split_huge_page_splitting(), where the main
6983 	 *    part of thp split is not executed yet.
6984 	 */
6985 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6986 		if (mc.precharge < HPAGE_PMD_NR) {
6987 			spin_unlock(ptl);
6988 			return 0;
6989 		}
6990 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6991 		if (target_type == MC_TARGET_PAGE) {
6992 			page = target.page;
6993 			if (!isolate_lru_page(page)) {
6994 				pc = lookup_page_cgroup(page);
6995 				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6996 							pc, mc.from, mc.to)) {
6997 					mc.precharge -= HPAGE_PMD_NR;
6998 					mc.moved_charge += HPAGE_PMD_NR;
6999 				}
7000 				putback_lru_page(page);
7001 			}
7002 			put_page(page);
7003 		}
7004 		spin_unlock(ptl);
7005 		return 0;
7006 	}
7007 
7008 	if (pmd_trans_unstable(pmd))
7009 		return 0;
7010 retry:
7011 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
7012 	for (; addr != end; addr += PAGE_SIZE) {
7013 		pte_t ptent = *(pte++);
7014 		swp_entry_t ent;
7015 
7016 		if (!mc.precharge)
7017 			break;
7018 
7019 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
7020 		case MC_TARGET_PAGE:
7021 			page = target.page;
7022 			if (isolate_lru_page(page))
7023 				goto put;
7024 			pc = lookup_page_cgroup(page);
7025 			if (!mem_cgroup_move_account(page, 1, pc,
7026 						     mc.from, mc.to)) {
7027 				mc.precharge--;
7028 				/* we uncharge from mc.from later. */
7029 				mc.moved_charge++;
7030 			}
7031 			putback_lru_page(page);
7032 put:			/* get_mctgt_type() gets the page */
7033 			put_page(page);
7034 			break;
7035 		case MC_TARGET_SWAP:
7036 			ent = target.ent;
7037 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
7038 				mc.precharge--;
7039 				/* we fixup refcnts and charges later. */
7040 				mc.moved_swap++;
7041 			}
7042 			break;
7043 		default:
7044 			break;
7045 		}
7046 	}
7047 	pte_unmap_unlock(pte - 1, ptl);
7048 	cond_resched();
7049 
7050 	if (addr != end) {
7051 		/*
7052 		 * We have consumed all precharges we got in can_attach().
7053 		 * We try charge one by one, but don't do any additional
7054 		 * charges to mc.to if we have failed in charge once in attach()
7055 		 * phase.
7056 		 */
7057 		ret = mem_cgroup_do_precharge(1);
7058 		if (!ret)
7059 			goto retry;
7060 	}
7061 
7062 	return ret;
7063 }
7064 
7065 static void mem_cgroup_move_charge(struct mm_struct *mm)
7066 {
7067 	struct vm_area_struct *vma;
7068 
7069 	lru_add_drain_all();
7070 retry:
7071 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
7072 		/*
7073 		 * Someone who are holding the mmap_sem might be waiting in
7074 		 * waitq. So we cancel all extra charges, wake up all waiters,
7075 		 * and retry. Because we cancel precharges, we might not be able
7076 		 * to move enough charges, but moving charge is a best-effort
7077 		 * feature anyway, so it wouldn't be a big problem.
7078 		 */
7079 		__mem_cgroup_clear_mc();
7080 		cond_resched();
7081 		goto retry;
7082 	}
7083 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
7084 		int ret;
7085 		struct mm_walk mem_cgroup_move_charge_walk = {
7086 			.pmd_entry = mem_cgroup_move_charge_pte_range,
7087 			.mm = mm,
7088 			.private = vma,
7089 		};
7090 		if (is_vm_hugetlb_page(vma))
7091 			continue;
7092 		ret = walk_page_range(vma->vm_start, vma->vm_end,
7093 						&mem_cgroup_move_charge_walk);
7094 		if (ret)
7095 			/*
7096 			 * means we have consumed all precharges and failed in
7097 			 * doing additional charge. Just abandon here.
7098 			 */
7099 			break;
7100 	}
7101 	up_read(&mm->mmap_sem);
7102 }
7103 
7104 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7105 				 struct cgroup_taskset *tset)
7106 {
7107 	struct task_struct *p = cgroup_taskset_first(tset);
7108 	struct mm_struct *mm = get_task_mm(p);
7109 
7110 	if (mm) {
7111 		if (mc.to)
7112 			mem_cgroup_move_charge(mm);
7113 		mmput(mm);
7114 	}
7115 	if (mc.to)
7116 		mem_cgroup_clear_mc();
7117 }
7118 #else	/* !CONFIG_MMU */
7119 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
7120 				 struct cgroup_taskset *tset)
7121 {
7122 	return 0;
7123 }
7124 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
7125 				     struct cgroup_taskset *tset)
7126 {
7127 }
7128 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7129 				 struct cgroup_taskset *tset)
7130 {
7131 }
7132 #endif
7133 
7134 /*
7135  * Cgroup retains root cgroups across [un]mount cycles making it necessary
7136  * to verify sane_behavior flag on each mount attempt.
7137  */
7138 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
7139 {
7140 	/*
7141 	 * use_hierarchy is forced with sane_behavior.  cgroup core
7142 	 * guarantees that @root doesn't have any children, so turning it
7143 	 * on for the root memcg is enough.
7144 	 */
7145 	if (cgroup_sane_behavior(root_css->cgroup))
7146 		mem_cgroup_from_css(root_css)->use_hierarchy = true;
7147 }
7148 
7149 struct cgroup_subsys memory_cgrp_subsys = {
7150 	.css_alloc = mem_cgroup_css_alloc,
7151 	.css_online = mem_cgroup_css_online,
7152 	.css_offline = mem_cgroup_css_offline,
7153 	.css_free = mem_cgroup_css_free,
7154 	.can_attach = mem_cgroup_can_attach,
7155 	.cancel_attach = mem_cgroup_cancel_attach,
7156 	.attach = mem_cgroup_move_task,
7157 	.bind = mem_cgroup_bind,
7158 	.base_cftypes = mem_cgroup_files,
7159 	.early_init = 0,
7160 };
7161 
7162 #ifdef CONFIG_MEMCG_SWAP
7163 static int __init enable_swap_account(char *s)
7164 {
7165 	if (!strcmp(s, "1"))
7166 		really_do_swap_account = 1;
7167 	else if (!strcmp(s, "0"))
7168 		really_do_swap_account = 0;
7169 	return 1;
7170 }
7171 __setup("swapaccount=", enable_swap_account);
7172 
7173 static void __init memsw_file_init(void)
7174 {
7175 	WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
7176 }
7177 
7178 static void __init enable_swap_cgroup(void)
7179 {
7180 	if (!mem_cgroup_disabled() && really_do_swap_account) {
7181 		do_swap_account = 1;
7182 		memsw_file_init();
7183 	}
7184 }
7185 
7186 #else
7187 static void __init enable_swap_cgroup(void)
7188 {
7189 }
7190 #endif
7191 
7192 /*
7193  * subsys_initcall() for memory controller.
7194  *
7195  * Some parts like hotcpu_notifier() have to be initialized from this context
7196  * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7197  * everything that doesn't depend on a specific mem_cgroup structure should
7198  * be initialized from here.
7199  */
7200 static int __init mem_cgroup_init(void)
7201 {
7202 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
7203 	enable_swap_cgroup();
7204 	mem_cgroup_soft_limit_tree_init();
7205 	memcg_stock_init();
7206 	return 0;
7207 }
7208 subsys_initcall(mem_cgroup_init);
7209