xref: /linux/include/linux/memcontrol.h (revision 971370a88c3b1be1144c11468b4c84e3ed17af6d)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  */
10 
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/kernel.h>
18 #include <linux/page_counter.h>
19 #include <linux/vmpressure.h>
20 #include <linux/eventfd.h>
21 #include <linux/mm.h>
22 #include <linux/vmstat.h>
23 #include <linux/writeback.h>
24 #include <linux/page-flags.h>
25 #include <linux/shrinker.h>
26 
27 struct mem_cgroup;
28 struct obj_cgroup;
29 struct page;
30 struct mm_struct;
31 struct kmem_cache;
32 
33 /* Cgroup-specific page state, on top of universal node page state */
34 enum memcg_stat_item {
35 	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
36 	MEMCG_SOCK,
37 	MEMCG_PERCPU_B,
38 	MEMCG_VMALLOC,
39 	MEMCG_KMEM,
40 	MEMCG_ZSWAP_B,
41 	MEMCG_ZSWAPPED,
42 	MEMCG_NR_STAT,
43 };
44 
45 enum memcg_memory_event {
46 	MEMCG_LOW,
47 	MEMCG_HIGH,
48 	MEMCG_MAX,
49 	MEMCG_OOM,
50 	MEMCG_OOM_KILL,
51 	MEMCG_OOM_GROUP_KILL,
52 	MEMCG_SWAP_HIGH,
53 	MEMCG_SWAP_MAX,
54 	MEMCG_SWAP_FAIL,
55 	MEMCG_NR_MEMORY_EVENTS,
56 };
57 
58 struct mem_cgroup_reclaim_cookie {
59 	pg_data_t *pgdat;
60 	int generation;
61 };
62 
63 #ifdef CONFIG_MEMCG
64 
65 #define MEM_CGROUP_ID_SHIFT	16
66 
67 struct mem_cgroup_id {
68 	int id;
69 	refcount_t ref;
70 };
71 
72 struct memcg_vmstats_percpu;
73 struct memcg1_events_percpu;
74 struct memcg_vmstats;
75 struct lruvec_stats_percpu;
76 struct lruvec_stats;
77 
78 struct mem_cgroup_reclaim_iter {
79 	struct mem_cgroup *position;
80 	/* scan generation, increased every round-trip */
81 	atomic_t generation;
82 };
83 
84 /*
85  * per-node information in memory controller.
86  */
87 struct mem_cgroup_per_node {
88 	/* Keep the read-only fields at the start */
89 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
90 						/* use container_of	   */
91 
92 	struct lruvec_stats_percpu __percpu	*lruvec_stats_percpu;
93 	struct lruvec_stats			*lruvec_stats;
94 	struct shrinker_info __rcu	*shrinker_info;
95 
96 #ifdef CONFIG_MEMCG_V1
97 	/*
98 	 * Memcg-v1 only stuff in middle as buffer between read mostly fields
99 	 * and update often fields to avoid false sharing. If v1 stuff is
100 	 * not present, an explicit padding is needed.
101 	 */
102 
103 	struct rb_node		tree_node;	/* RB tree node */
104 	unsigned long		usage_in_excess;/* Set to the value by which */
105 						/* the soft limit is exceeded*/
106 	bool			on_tree;
107 #else
108 	CACHELINE_PADDING(_pad1_);
109 #endif
110 
111 	/* Fields which get updated often at the end. */
112 	struct lruvec		lruvec;
113 	CACHELINE_PADDING(_pad2_);
114 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
115 	struct mem_cgroup_reclaim_iter	iter;
116 
117 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
118 	/* slab stats for nmi context */
119 	atomic_t		slab_reclaimable;
120 	atomic_t		slab_unreclaimable;
121 #endif
122 };
123 
124 struct mem_cgroup_threshold {
125 	struct eventfd_ctx *eventfd;
126 	unsigned long threshold;
127 };
128 
129 /* For threshold */
130 struct mem_cgroup_threshold_ary {
131 	/* An array index points to threshold just below or equal to usage. */
132 	int current_threshold;
133 	/* Size of entries[] */
134 	unsigned int size;
135 	/* Array of thresholds */
136 	struct mem_cgroup_threshold entries[] __counted_by(size);
137 };
138 
139 struct mem_cgroup_thresholds {
140 	/* Primary thresholds array */
141 	struct mem_cgroup_threshold_ary *primary;
142 	/*
143 	 * Spare threshold array.
144 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
145 	 * It must be able to store at least primary->size - 1 entries.
146 	 */
147 	struct mem_cgroup_threshold_ary *spare;
148 };
149 
150 /*
151  * Remember four most recent foreign writebacks with dirty pages in this
152  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
153  * one in a given round, we're likely to catch it later if it keeps
154  * foreign-dirtying, so a fairly low count should be enough.
155  *
156  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
157  */
158 #define MEMCG_CGWB_FRN_CNT	4
159 
160 struct memcg_cgwb_frn {
161 	u64 bdi_id;			/* bdi->id of the foreign inode */
162 	int memcg_id;			/* memcg->css.id of foreign inode */
163 	u64 at;				/* jiffies_64 at the time of dirtying */
164 	struct wb_completion done;	/* tracks in-flight foreign writebacks */
165 };
166 
167 /*
168  * Bucket for arbitrarily byte-sized objects charged to a memory
169  * cgroup. The bucket can be reparented in one piece when the cgroup
170  * is destroyed, without having to round up the individual references
171  * of all live memory objects in the wild.
172  */
173 struct obj_cgroup {
174 	struct percpu_ref refcnt;
175 	struct mem_cgroup *memcg;
176 	atomic_t nr_charged_bytes;
177 	union {
178 		struct list_head list; /* protected by objcg_lock */
179 		struct rcu_head rcu;
180 	};
181 };
182 
183 /*
184  * The memory controller data structure. The memory controller controls both
185  * page cache and RSS per cgroup. We would eventually like to provide
186  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
187  * to help the administrator determine what knobs to tune.
188  */
189 struct mem_cgroup {
190 	struct cgroup_subsys_state css;
191 
192 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
193 	struct mem_cgroup_id id;
194 
195 	/* Accounted resources */
196 	struct page_counter memory;		/* Both v1 & v2 */
197 
198 	union {
199 		struct page_counter swap;	/* v2 only */
200 		struct page_counter memsw;	/* v1 only */
201 	};
202 
203 	/* registered local peak watchers */
204 	struct list_head memory_peaks;
205 	struct list_head swap_peaks;
206 	spinlock_t	 peaks_lock;
207 
208 	/* Range enforcement for interrupt charges */
209 	struct work_struct high_work;
210 
211 #ifdef CONFIG_ZSWAP
212 	unsigned long zswap_max;
213 
214 	/*
215 	 * Prevent pages from this memcg from being written back from zswap to
216 	 * swap, and from being swapped out on zswap store failures.
217 	 */
218 	bool zswap_writeback;
219 #endif
220 
221 	/* vmpressure notifications */
222 	struct vmpressure vmpressure;
223 
224 	/*
225 	 * Should the OOM killer kill all belonging tasks, had it kill one?
226 	 */
227 	bool oom_group;
228 
229 	int swappiness;
230 
231 	/* memory.events and memory.events.local */
232 	struct cgroup_file events_file;
233 	struct cgroup_file events_local_file;
234 
235 	/* handle for "memory.swap.events" */
236 	struct cgroup_file swap_events_file;
237 
238 	/* memory.stat */
239 	struct memcg_vmstats	*vmstats;
240 
241 	/* memory.events */
242 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
243 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
244 
245 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
246 	/* MEMCG_KMEM for nmi context */
247 	atomic_t		kmem_stat;
248 #endif
249 	/*
250 	 * Hint of reclaim pressure for socket memroy management. Note
251 	 * that this indicator should NOT be used in legacy cgroup mode
252 	 * where socket memory is accounted/charged separately.
253 	 */
254 	u64			socket_pressure;
255 #if BITS_PER_LONG < 64
256 	seqlock_t		socket_pressure_seqlock;
257 #endif
258 	int kmemcg_id;
259 	/*
260 	 * memcg->objcg is wiped out as a part of the objcg repaprenting
261 	 * process. memcg->orig_objcg preserves a pointer (and a reference)
262 	 * to the original objcg until the end of live of memcg.
263 	 */
264 	struct obj_cgroup __rcu	*objcg;
265 	struct obj_cgroup	*orig_objcg;
266 	/* list of inherited objcgs, protected by objcg_lock */
267 	struct list_head objcg_list;
268 
269 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
270 
271 #ifdef CONFIG_CGROUP_WRITEBACK
272 	struct list_head cgwb_list;
273 	struct wb_domain cgwb_domain;
274 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
275 #endif
276 
277 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
278 	struct deferred_split deferred_split_queue;
279 #endif
280 
281 #ifdef CONFIG_LRU_GEN_WALKS_MMU
282 	/* per-memcg mm_struct list */
283 	struct lru_gen_mm_list mm_list;
284 #endif
285 
286 #ifdef CONFIG_MEMCG_V1
287 	/* Legacy consumer-oriented counters */
288 	struct page_counter kmem;		/* v1 only */
289 	struct page_counter tcpmem;		/* v1 only */
290 
291 	struct memcg1_events_percpu __percpu *events_percpu;
292 
293 	unsigned long soft_limit;
294 
295 	/* protected by memcg_oom_lock */
296 	bool oom_lock;
297 	int under_oom;
298 
299 	/* OOM-Killer disable */
300 	int oom_kill_disable;
301 
302 	/* protect arrays of thresholds */
303 	struct mutex thresholds_lock;
304 
305 	/* thresholds for memory usage. RCU-protected */
306 	struct mem_cgroup_thresholds thresholds;
307 
308 	/* thresholds for mem+swap usage. RCU-protected */
309 	struct mem_cgroup_thresholds memsw_thresholds;
310 
311 	/* For oom notifier event fd */
312 	struct list_head oom_notify;
313 
314 	/* Legacy tcp memory accounting */
315 	bool tcpmem_active;
316 	int tcpmem_pressure;
317 
318 	/* List of events which userspace want to receive */
319 	struct list_head event_list;
320 	spinlock_t event_list_lock;
321 #endif /* CONFIG_MEMCG_V1 */
322 
323 	struct mem_cgroup_per_node *nodeinfo[];
324 };
325 
326 /*
327  * size of first charge trial.
328  * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
329  * workload.
330  */
331 #define MEMCG_CHARGE_BATCH 64U
332 
333 extern struct mem_cgroup *root_mem_cgroup;
334 
335 enum page_memcg_data_flags {
336 	/* page->memcg_data is a pointer to an slabobj_ext vector */
337 	MEMCG_DATA_OBJEXTS = (1UL << 0),
338 	/* page has been accounted as a non-slab kernel page */
339 	MEMCG_DATA_KMEM = (1UL << 1),
340 	/* the next bit after the last actual flag */
341 	__NR_MEMCG_DATA_FLAGS  = (1UL << 2),
342 };
343 
344 #define __OBJEXTS_ALLOC_FAIL	MEMCG_DATA_OBJEXTS
345 #define __FIRST_OBJEXT_FLAG	__NR_MEMCG_DATA_FLAGS
346 
347 #else /* CONFIG_MEMCG */
348 
349 #define __OBJEXTS_ALLOC_FAIL	(1UL << 0)
350 #define __FIRST_OBJEXT_FLAG	(1UL << 0)
351 
352 #endif /* CONFIG_MEMCG */
353 
354 enum objext_flags {
355 	/*
356 	 * Use bit 0 with zero other bits to signal that slabobj_ext vector
357 	 * failed to allocate. The same bit 0 with valid upper bits means
358 	 * MEMCG_DATA_OBJEXTS.
359 	 */
360 	OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
361 	/* slabobj_ext vector allocated with kmalloc_nolock() */
362 	OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
363 	/* the next bit after the last actual flag */
364 	__NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
365 };
366 
367 #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
368 
369 #ifdef CONFIG_MEMCG
370 
371 static inline bool folio_memcg_kmem(struct folio *folio);
372 
373 /*
374  * After the initialization objcg->memcg is always pointing at
375  * a valid memcg, but can be atomically swapped to the parent memcg.
376  *
377  * The caller must ensure that the returned memcg won't be released.
378  */
obj_cgroup_memcg(struct obj_cgroup * objcg)379 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
380 {
381 	lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
382 	return READ_ONCE(objcg->memcg);
383 }
384 
385 /*
386  * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
387  * @folio: Pointer to the folio.
388  *
389  * Returns a pointer to the memory cgroup associated with the folio,
390  * or NULL. This function assumes that the folio is known to have a
391  * proper memory cgroup pointer. It's not safe to call this function
392  * against some type of folios, e.g. slab folios or ex-slab folios or
393  * kmem folios.
394  */
__folio_memcg(struct folio * folio)395 static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
396 {
397 	unsigned long memcg_data = folio->memcg_data;
398 
399 	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
400 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
401 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
402 
403 	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
404 }
405 
406 /*
407  * __folio_objcg - get the object cgroup associated with a kmem folio.
408  * @folio: Pointer to the folio.
409  *
410  * Returns a pointer to the object cgroup associated with the folio,
411  * or NULL. This function assumes that the folio is known to have a
412  * proper object cgroup pointer. It's not safe to call this function
413  * against some type of folios, e.g. slab folios or ex-slab folios or
414  * LRU folios.
415  */
__folio_objcg(struct folio * folio)416 static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
417 {
418 	unsigned long memcg_data = folio->memcg_data;
419 
420 	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
421 	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
422 	VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
423 
424 	return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
425 }
426 
427 /*
428  * folio_memcg - Get the memory cgroup associated with a folio.
429  * @folio: Pointer to the folio.
430  *
431  * Returns a pointer to the memory cgroup associated with the folio,
432  * or NULL. This function assumes that the folio is known to have a
433  * proper memory cgroup pointer. It's not safe to call this function
434  * against some type of folios, e.g. slab folios or ex-slab folios.
435  *
436  * For a non-kmem folio any of the following ensures folio and memcg binding
437  * stability:
438  *
439  * - the folio lock
440  * - LRU isolation
441  * - exclusive reference
442  *
443  * For a kmem folio a caller should hold an rcu read lock to protect memcg
444  * associated with a kmem folio from being released.
445  */
folio_memcg(struct folio * folio)446 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
447 {
448 	if (folio_memcg_kmem(folio))
449 		return obj_cgroup_memcg(__folio_objcg(folio));
450 	return __folio_memcg(folio);
451 }
452 
453 /*
454  * folio_memcg_charged - If a folio is charged to a memory cgroup.
455  * @folio: Pointer to the folio.
456  *
457  * Returns true if folio is charged to a memory cgroup, otherwise returns false.
458  */
folio_memcg_charged(struct folio * folio)459 static inline bool folio_memcg_charged(struct folio *folio)
460 {
461 	return folio->memcg_data != 0;
462 }
463 
464 /*
465  * folio_memcg_check - Get the memory cgroup associated with a folio.
466  * @folio: Pointer to the folio.
467  *
468  * Returns a pointer to the memory cgroup associated with the folio,
469  * or NULL. This function unlike folio_memcg() can take any folio
470  * as an argument. It has to be used in cases when it's not known if a folio
471  * has an associated memory cgroup pointer or an object cgroups vector or
472  * an object cgroup.
473  *
474  * For a non-kmem folio any of the following ensures folio and memcg binding
475  * stability:
476  *
477  * - the folio lock
478  * - LRU isolation
479  * - exclusive reference
480  *
481  * For a kmem folio a caller should hold an rcu read lock to protect memcg
482  * associated with a kmem folio from being released.
483  */
folio_memcg_check(struct folio * folio)484 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
485 {
486 	/*
487 	 * Because folio->memcg_data might be changed asynchronously
488 	 * for slabs, READ_ONCE() should be used here.
489 	 */
490 	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
491 
492 	if (memcg_data & MEMCG_DATA_OBJEXTS)
493 		return NULL;
494 
495 	if (memcg_data & MEMCG_DATA_KMEM) {
496 		struct obj_cgroup *objcg;
497 
498 		objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
499 		return obj_cgroup_memcg(objcg);
500 	}
501 
502 	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
503 }
504 
page_memcg_check(struct page * page)505 static inline struct mem_cgroup *page_memcg_check(struct page *page)
506 {
507 	if (PageTail(page))
508 		return NULL;
509 	return folio_memcg_check((struct folio *)page);
510 }
511 
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)512 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
513 {
514 	struct mem_cgroup *memcg;
515 
516 	rcu_read_lock();
517 retry:
518 	memcg = obj_cgroup_memcg(objcg);
519 	if (unlikely(!css_tryget(&memcg->css)))
520 		goto retry;
521 	rcu_read_unlock();
522 
523 	return memcg;
524 }
525 
526 /*
527  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
528  * @folio: Pointer to the folio.
529  *
530  * Checks if the folio has MemcgKmem flag set. The caller must ensure
531  * that the folio has an associated memory cgroup. It's not safe to call
532  * this function against some types of folios, e.g. slab folios.
533  */
folio_memcg_kmem(struct folio * folio)534 static inline bool folio_memcg_kmem(struct folio *folio)
535 {
536 	VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
537 	VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
538 	return folio->memcg_data & MEMCG_DATA_KMEM;
539 }
540 
PageMemcgKmem(struct page * page)541 static inline bool PageMemcgKmem(struct page *page)
542 {
543 	return folio_memcg_kmem(page_folio(page));
544 }
545 
mem_cgroup_is_root(struct mem_cgroup * memcg)546 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
547 {
548 	return (memcg == root_mem_cgroup);
549 }
550 
mem_cgroup_disabled(void)551 static inline bool mem_cgroup_disabled(void)
552 {
553 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
554 }
555 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)556 static inline void mem_cgroup_protection(struct mem_cgroup *root,
557 					 struct mem_cgroup *memcg,
558 					 unsigned long *min,
559 					 unsigned long *low)
560 {
561 	*min = *low = 0;
562 
563 	if (mem_cgroup_disabled())
564 		return;
565 
566 	/*
567 	 * There is no reclaim protection applied to a targeted reclaim.
568 	 * We are special casing this specific case here because
569 	 * mem_cgroup_calculate_protection is not robust enough to keep
570 	 * the protection invariant for calculated effective values for
571 	 * parallel reclaimers with different reclaim target. This is
572 	 * especially a problem for tail memcgs (as they have pages on LRU)
573 	 * which would want to have effective values 0 for targeted reclaim
574 	 * but a different value for external reclaim.
575 	 *
576 	 * Example
577 	 * Let's have global and A's reclaim in parallel:
578 	 *  |
579 	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
580 	 *  |\
581 	 *  | C (low = 1G, usage = 2.5G)
582 	 *  B (low = 1G, usage = 0.5G)
583 	 *
584 	 * For the global reclaim
585 	 * A.elow = A.low
586 	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
587 	 * C.elow = min(C.usage, C.low)
588 	 *
589 	 * With the effective values resetting we have A reclaim
590 	 * A.elow = 0
591 	 * B.elow = B.low
592 	 * C.elow = C.low
593 	 *
594 	 * If the global reclaim races with A's reclaim then
595 	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
596 	 * is possible and reclaiming B would be violating the protection.
597 	 *
598 	 */
599 	if (root == memcg)
600 		return;
601 
602 	*min = READ_ONCE(memcg->memory.emin);
603 	*low = READ_ONCE(memcg->memory.elow);
604 }
605 
606 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
607 				     struct mem_cgroup *memcg);
608 
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)609 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
610 					  struct mem_cgroup *memcg)
611 {
612 	/*
613 	 * The root memcg doesn't account charges, and doesn't support
614 	 * protection. The target memcg's protection is ignored, see
615 	 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
616 	 */
617 	return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
618 		memcg == target;
619 }
620 
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)621 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
622 					struct mem_cgroup *memcg)
623 {
624 	if (mem_cgroup_unprotected(target, memcg))
625 		return false;
626 
627 	return READ_ONCE(memcg->memory.elow) >=
628 		page_counter_read(&memcg->memory);
629 }
630 
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)631 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
632 					struct mem_cgroup *memcg)
633 {
634 	if (mem_cgroup_unprotected(target, memcg))
635 		return false;
636 
637 	return READ_ONCE(memcg->memory.emin) >=
638 		page_counter_read(&memcg->memory);
639 }
640 
641 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
642 
643 /**
644  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
645  * @folio: Folio to charge.
646  * @mm: mm context of the allocating task.
647  * @gfp: Reclaim mode.
648  *
649  * Try to charge @folio to the memcg that @mm belongs to, reclaiming
650  * pages according to @gfp if necessary.  If @mm is NULL, try to
651  * charge to the active memcg.
652  *
653  * Do not use this for folios allocated for swapin.
654  *
655  * Return: 0 on success. Otherwise, an error code is returned.
656  */
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)657 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
658 				    gfp_t gfp)
659 {
660 	if (mem_cgroup_disabled())
661 		return 0;
662 	return __mem_cgroup_charge(folio, mm, gfp);
663 }
664 
665 int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
666 
667 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
668 				  gfp_t gfp, swp_entry_t entry);
669 
670 void __mem_cgroup_uncharge(struct folio *folio);
671 
672 /**
673  * mem_cgroup_uncharge - Uncharge a folio.
674  * @folio: Folio to uncharge.
675  *
676  * Uncharge a folio previously charged with mem_cgroup_charge().
677  */
mem_cgroup_uncharge(struct folio * folio)678 static inline void mem_cgroup_uncharge(struct folio *folio)
679 {
680 	if (mem_cgroup_disabled())
681 		return;
682 	__mem_cgroup_uncharge(folio);
683 }
684 
685 void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
mem_cgroup_uncharge_folios(struct folio_batch * folios)686 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
687 {
688 	if (mem_cgroup_disabled())
689 		return;
690 	__mem_cgroup_uncharge_folios(folios);
691 }
692 
693 void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
694 void mem_cgroup_migrate(struct folio *old, struct folio *new);
695 
696 /**
697  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
698  * @memcg: memcg of the wanted lruvec
699  * @pgdat: pglist_data
700  *
701  * Returns the lru list vector holding pages for a given @memcg &
702  * @pgdat combination. This can be the node lruvec, if the memory
703  * controller is disabled.
704  */
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)705 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
706 					       struct pglist_data *pgdat)
707 {
708 	struct mem_cgroup_per_node *mz;
709 	struct lruvec *lruvec;
710 
711 	if (mem_cgroup_disabled()) {
712 		lruvec = &pgdat->__lruvec;
713 		goto out;
714 	}
715 
716 	if (!memcg)
717 		memcg = root_mem_cgroup;
718 
719 	mz = memcg->nodeinfo[pgdat->node_id];
720 	lruvec = &mz->lruvec;
721 out:
722 	/*
723 	 * Since a node can be onlined after the mem_cgroup was created,
724 	 * we have to be prepared to initialize lruvec->pgdat here;
725 	 * and if offlined then reonlined, we need to reinitialize it.
726 	 */
727 	if (unlikely(lruvec->pgdat != pgdat))
728 		lruvec->pgdat = pgdat;
729 	return lruvec;
730 }
731 
732 /**
733  * folio_lruvec - return lruvec for isolating/putting an LRU folio
734  * @folio: Pointer to the folio.
735  *
736  * This function relies on folio->mem_cgroup being stable.
737  */
folio_lruvec(struct folio * folio)738 static inline struct lruvec *folio_lruvec(struct folio *folio)
739 {
740 	struct mem_cgroup *memcg = folio_memcg(folio);
741 
742 	VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
743 	return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
744 }
745 
746 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
747 
748 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
749 
750 struct mem_cgroup *get_mem_cgroup_from_current(void);
751 
752 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
753 
754 struct lruvec *folio_lruvec_lock(struct folio *folio);
755 struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
756 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
757 						unsigned long *flags);
758 
759 #ifdef CONFIG_DEBUG_VM
760 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
761 #else
762 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)763 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
764 {
765 }
766 #endif
767 
768 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)769 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
770 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
771 }
772 
obj_cgroup_tryget(struct obj_cgroup * objcg)773 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
774 {
775 	return percpu_ref_tryget(&objcg->refcnt);
776 }
777 
obj_cgroup_get(struct obj_cgroup * objcg)778 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
779 {
780 	percpu_ref_get(&objcg->refcnt);
781 }
782 
obj_cgroup_get_many(struct obj_cgroup * objcg,unsigned long nr)783 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
784 				       unsigned long nr)
785 {
786 	percpu_ref_get_many(&objcg->refcnt, nr);
787 }
788 
obj_cgroup_put(struct obj_cgroup * objcg)789 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
790 {
791 	if (objcg)
792 		percpu_ref_put(&objcg->refcnt);
793 }
794 
mem_cgroup_tryget(struct mem_cgroup * memcg)795 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
796 {
797 	return !memcg || css_tryget(&memcg->css);
798 }
799 
mem_cgroup_tryget_online(struct mem_cgroup * memcg)800 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
801 {
802 	return !memcg || css_tryget_online(&memcg->css);
803 }
804 
mem_cgroup_put(struct mem_cgroup * memcg)805 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
806 {
807 	if (memcg)
808 		css_put(&memcg->css);
809 }
810 
811 #define mem_cgroup_from_counter(counter, member)	\
812 	container_of(counter, struct mem_cgroup, member)
813 
814 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
815 				   struct mem_cgroup *,
816 				   struct mem_cgroup_reclaim_cookie *);
817 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
818 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
819 			   int (*)(struct task_struct *, void *), void *arg);
820 
mem_cgroup_id(struct mem_cgroup * memcg)821 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
822 {
823 	if (mem_cgroup_disabled())
824 		return 0;
825 
826 	return memcg->id.id;
827 }
828 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
829 
830 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)831 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
832 {
833 	return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
834 }
835 
836 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
837 #endif
838 
mem_cgroup_from_seq(struct seq_file * m)839 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
840 {
841 	return mem_cgroup_from_css(seq_css(m));
842 }
843 
lruvec_memcg(struct lruvec * lruvec)844 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
845 {
846 	struct mem_cgroup_per_node *mz;
847 
848 	if (mem_cgroup_disabled())
849 		return NULL;
850 
851 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
852 	return mz->memcg;
853 }
854 
855 /**
856  * parent_mem_cgroup - find the accounting parent of a memcg
857  * @memcg: memcg whose parent to find
858  *
859  * Returns the parent memcg, or NULL if this is the root.
860  */
parent_mem_cgroup(struct mem_cgroup * memcg)861 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
862 {
863 	return mem_cgroup_from_css(memcg->css.parent);
864 }
865 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)866 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
867 			      struct mem_cgroup *root)
868 {
869 	if (root == memcg)
870 		return true;
871 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
872 }
873 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)874 static inline bool mm_match_cgroup(struct mm_struct *mm,
875 				   struct mem_cgroup *memcg)
876 {
877 	struct mem_cgroup *task_memcg;
878 	bool match = false;
879 
880 	rcu_read_lock();
881 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
882 	if (task_memcg)
883 		match = mem_cgroup_is_descendant(task_memcg, memcg);
884 	rcu_read_unlock();
885 	return match;
886 }
887 
888 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
889 ino_t page_cgroup_ino(struct page *page);
890 
mem_cgroup_online(struct mem_cgroup * memcg)891 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
892 {
893 	if (mem_cgroup_disabled())
894 		return true;
895 	return !!(memcg->css.flags & CSS_ONLINE);
896 }
897 
898 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
899 		int zid, int nr_pages);
900 
901 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)902 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
903 		enum lru_list lru, int zone_idx)
904 {
905 	struct mem_cgroup_per_node *mz;
906 
907 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
908 	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
909 }
910 
911 void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
912 
mem_cgroup_handle_over_high(gfp_t gfp_mask)913 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
914 {
915 	if (unlikely(current->memcg_nr_pages_over_high))
916 		__mem_cgroup_handle_over_high(gfp_mask);
917 }
918 
919 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
920 
921 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
922 
923 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
924 				struct task_struct *p);
925 
926 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
927 
928 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
929 					    struct mem_cgroup *oom_domain);
930 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
931 
932 /* idx can be of type enum memcg_stat_item or node_stat_item */
933 void mod_memcg_state(struct mem_cgroup *memcg,
934 		     enum memcg_stat_item idx, int val);
935 
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)936 static inline void mod_memcg_page_state(struct page *page,
937 					enum memcg_stat_item idx, int val)
938 {
939 	struct mem_cgroup *memcg;
940 
941 	if (mem_cgroup_disabled())
942 		return;
943 
944 	rcu_read_lock();
945 	memcg = folio_memcg(page_folio(page));
946 	if (memcg)
947 		mod_memcg_state(memcg, idx, val);
948 	rcu_read_unlock();
949 }
950 
951 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
952 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
953 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
954 				      enum node_stat_item idx);
955 
956 void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
957 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
958 
959 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
960 
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)961 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
962 					 int val)
963 {
964 	unsigned long flags;
965 
966 	local_irq_save(flags);
967 	__mod_lruvec_kmem_state(p, idx, val);
968 	local_irq_restore(flags);
969 }
970 
971 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
972 			unsigned long count);
973 
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)974 static inline void count_memcg_folio_events(struct folio *folio,
975 		enum vm_event_item idx, unsigned long nr)
976 {
977 	struct mem_cgroup *memcg = folio_memcg(folio);
978 
979 	if (memcg)
980 		count_memcg_events(memcg, idx, nr);
981 }
982 
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)983 static inline void count_memcg_events_mm(struct mm_struct *mm,
984 					enum vm_event_item idx, unsigned long count)
985 {
986 	struct mem_cgroup *memcg;
987 
988 	if (mem_cgroup_disabled())
989 		return;
990 
991 	rcu_read_lock();
992 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
993 	if (likely(memcg))
994 		count_memcg_events(memcg, idx, count);
995 	rcu_read_unlock();
996 }
997 
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)998 static inline void count_memcg_event_mm(struct mm_struct *mm,
999 					enum vm_event_item idx)
1000 {
1001 	count_memcg_events_mm(mm, idx, 1);
1002 }
1003 
__memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event,bool allow_spinning)1004 static inline void __memcg_memory_event(struct mem_cgroup *memcg,
1005 					enum memcg_memory_event event,
1006 					bool allow_spinning)
1007 {
1008 	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1009 			  event == MEMCG_SWAP_FAIL;
1010 
1011 	/* For now only MEMCG_MAX can happen with !allow_spinning context. */
1012 	VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
1013 
1014 	atomic_long_inc(&memcg->memory_events_local[event]);
1015 	if (!swap_event && allow_spinning)
1016 		cgroup_file_notify(&memcg->events_local_file);
1017 
1018 	do {
1019 		atomic_long_inc(&memcg->memory_events[event]);
1020 		if (allow_spinning) {
1021 			if (swap_event)
1022 				cgroup_file_notify(&memcg->swap_events_file);
1023 			else
1024 				cgroup_file_notify(&memcg->events_file);
1025 		}
1026 
1027 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1028 			break;
1029 		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1030 			break;
1031 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1032 		 !mem_cgroup_is_root(memcg));
1033 }
1034 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1035 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1036 				      enum memcg_memory_event event)
1037 {
1038 	__memcg_memory_event(memcg, event, true);
1039 }
1040 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1041 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1042 					 enum memcg_memory_event event)
1043 {
1044 	struct mem_cgroup *memcg;
1045 
1046 	if (mem_cgroup_disabled())
1047 		return;
1048 
1049 	rcu_read_lock();
1050 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1051 	if (likely(memcg))
1052 		memcg_memory_event(memcg, event);
1053 	rcu_read_unlock();
1054 }
1055 
1056 void split_page_memcg(struct page *first, unsigned order);
1057 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
1058 		unsigned new_order);
1059 
cgroup_id_from_mm(struct mm_struct * mm)1060 static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
1061 {
1062 	struct mem_cgroup *memcg;
1063 	u64 id;
1064 
1065 	if (mem_cgroup_disabled())
1066 		return 0;
1067 
1068 	rcu_read_lock();
1069 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1070 	if (!memcg)
1071 		memcg = root_mem_cgroup;
1072 	id = cgroup_id(memcg->css.cgroup);
1073 	rcu_read_unlock();
1074 	return id;
1075 }
1076 
1077 extern int mem_cgroup_init(void);
1078 #else /* CONFIG_MEMCG */
1079 
1080 #define MEM_CGROUP_ID_SHIFT	0
1081 
1082 #define root_mem_cgroup		(NULL)
1083 
folio_memcg(struct folio * folio)1084 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1085 {
1086 	return NULL;
1087 }
1088 
folio_memcg_charged(struct folio * folio)1089 static inline bool folio_memcg_charged(struct folio *folio)
1090 {
1091 	return false;
1092 }
1093 
folio_memcg_check(struct folio * folio)1094 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1095 {
1096 	return NULL;
1097 }
1098 
page_memcg_check(struct page * page)1099 static inline struct mem_cgroup *page_memcg_check(struct page *page)
1100 {
1101 	return NULL;
1102 }
1103 
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)1104 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
1105 {
1106 	return NULL;
1107 }
1108 
folio_memcg_kmem(struct folio * folio)1109 static inline bool folio_memcg_kmem(struct folio *folio)
1110 {
1111 	return false;
1112 }
1113 
PageMemcgKmem(struct page * page)1114 static inline bool PageMemcgKmem(struct page *page)
1115 {
1116 	return false;
1117 }
1118 
mem_cgroup_is_root(struct mem_cgroup * memcg)1119 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1120 {
1121 	return true;
1122 }
1123 
mem_cgroup_disabled(void)1124 static inline bool mem_cgroup_disabled(void)
1125 {
1126 	return true;
1127 }
1128 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1129 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1130 				      enum memcg_memory_event event)
1131 {
1132 }
1133 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1134 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1135 					 enum memcg_memory_event event)
1136 {
1137 }
1138 
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)1139 static inline void mem_cgroup_protection(struct mem_cgroup *root,
1140 					 struct mem_cgroup *memcg,
1141 					 unsigned long *min,
1142 					 unsigned long *low)
1143 {
1144 	*min = *low = 0;
1145 }
1146 
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)1147 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1148 						   struct mem_cgroup *memcg)
1149 {
1150 }
1151 
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)1152 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1153 					  struct mem_cgroup *memcg)
1154 {
1155 	return true;
1156 }
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)1157 static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1158 					struct mem_cgroup *memcg)
1159 {
1160 	return false;
1161 }
1162 
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)1163 static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1164 					struct mem_cgroup *memcg)
1165 {
1166 	return false;
1167 }
1168 
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)1169 static inline int mem_cgroup_charge(struct folio *folio,
1170 		struct mm_struct *mm, gfp_t gfp)
1171 {
1172 	return 0;
1173 }
1174 
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)1175 static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
1176 {
1177         return 0;
1178 }
1179 
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)1180 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
1181 			struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1182 {
1183 	return 0;
1184 }
1185 
mem_cgroup_uncharge(struct folio * folio)1186 static inline void mem_cgroup_uncharge(struct folio *folio)
1187 {
1188 }
1189 
mem_cgroup_uncharge_folios(struct folio_batch * folios)1190 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
1191 {
1192 }
1193 
mem_cgroup_replace_folio(struct folio * old,struct folio * new)1194 static inline void mem_cgroup_replace_folio(struct folio *old,
1195 		struct folio *new)
1196 {
1197 }
1198 
mem_cgroup_migrate(struct folio * old,struct folio * new)1199 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1200 {
1201 }
1202 
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)1203 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1204 					       struct pglist_data *pgdat)
1205 {
1206 	return &pgdat->__lruvec;
1207 }
1208 
folio_lruvec(struct folio * folio)1209 static inline struct lruvec *folio_lruvec(struct folio *folio)
1210 {
1211 	struct pglist_data *pgdat = folio_pgdat(folio);
1212 	return &pgdat->__lruvec;
1213 }
1214 
1215 static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1216 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1217 {
1218 }
1219 
parent_mem_cgroup(struct mem_cgroup * memcg)1220 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1221 {
1222 	return NULL;
1223 }
1224 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)1225 static inline bool mm_match_cgroup(struct mm_struct *mm,
1226 		struct mem_cgroup *memcg)
1227 {
1228 	return true;
1229 }
1230 
get_mem_cgroup_from_mm(struct mm_struct * mm)1231 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1232 {
1233 	return NULL;
1234 }
1235 
get_mem_cgroup_from_current(void)1236 static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1237 {
1238 	return NULL;
1239 }
1240 
get_mem_cgroup_from_folio(struct folio * folio)1241 static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1242 {
1243 	return NULL;
1244 }
1245 
1246 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)1247 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1248 {
1249 	return NULL;
1250 }
1251 
obj_cgroup_get(struct obj_cgroup * objcg)1252 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
1253 {
1254 }
1255 
obj_cgroup_put(struct obj_cgroup * objcg)1256 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1257 {
1258 }
1259 
mem_cgroup_tryget(struct mem_cgroup * memcg)1260 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1261 {
1262 	return true;
1263 }
1264 
mem_cgroup_tryget_online(struct mem_cgroup * memcg)1265 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1266 {
1267 	return true;
1268 }
1269 
mem_cgroup_put(struct mem_cgroup * memcg)1270 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1271 {
1272 }
1273 
folio_lruvec_lock(struct folio * folio)1274 static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1275 {
1276 	struct pglist_data *pgdat = folio_pgdat(folio);
1277 
1278 	spin_lock(&pgdat->__lruvec.lru_lock);
1279 	return &pgdat->__lruvec;
1280 }
1281 
folio_lruvec_lock_irq(struct folio * folio)1282 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1283 {
1284 	struct pglist_data *pgdat = folio_pgdat(folio);
1285 
1286 	spin_lock_irq(&pgdat->__lruvec.lru_lock);
1287 	return &pgdat->__lruvec;
1288 }
1289 
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flagsp)1290 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1291 		unsigned long *flagsp)
1292 {
1293 	struct pglist_data *pgdat = folio_pgdat(folio);
1294 
1295 	spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1296 	return &pgdat->__lruvec;
1297 }
1298 
1299 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1300 mem_cgroup_iter(struct mem_cgroup *root,
1301 		struct mem_cgroup *prev,
1302 		struct mem_cgroup_reclaim_cookie *reclaim)
1303 {
1304 	return NULL;
1305 }
1306 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1307 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1308 					 struct mem_cgroup *prev)
1309 {
1310 }
1311 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1312 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1313 		int (*fn)(struct task_struct *, void *), void *arg)
1314 {
1315 }
1316 
mem_cgroup_id(struct mem_cgroup * memcg)1317 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1318 {
1319 	return 0;
1320 }
1321 
mem_cgroup_from_id(unsigned short id)1322 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1323 {
1324 	WARN_ON_ONCE(id);
1325 	/* XXX: This should always return root_mem_cgroup */
1326 	return NULL;
1327 }
1328 
1329 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)1330 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1331 {
1332 	return 0;
1333 }
1334 
mem_cgroup_get_from_ino(unsigned long ino)1335 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1336 {
1337 	return NULL;
1338 }
1339 #endif
1340 
mem_cgroup_from_seq(struct seq_file * m)1341 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1342 {
1343 	return NULL;
1344 }
1345 
lruvec_memcg(struct lruvec * lruvec)1346 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1347 {
1348 	return NULL;
1349 }
1350 
mem_cgroup_online(struct mem_cgroup * memcg)1351 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1352 {
1353 	return true;
1354 }
1355 
1356 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)1357 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1358 		enum lru_list lru, int zone_idx)
1359 {
1360 	return 0;
1361 }
1362 
mem_cgroup_get_max(struct mem_cgroup * memcg)1363 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1364 {
1365 	return 0;
1366 }
1367 
mem_cgroup_size(struct mem_cgroup * memcg)1368 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1369 {
1370 	return 0;
1371 }
1372 
1373 static inline void
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1374 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1375 {
1376 }
1377 
1378 static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1379 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1380 {
1381 }
1382 
mem_cgroup_handle_over_high(gfp_t gfp_mask)1383 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
1384 {
1385 }
1386 
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1387 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1388 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1389 {
1390 	return NULL;
1391 }
1392 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1393 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1394 {
1395 }
1396 
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int nr)1397 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1398 				   enum memcg_stat_item idx,
1399 				   int nr)
1400 {
1401 }
1402 
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)1403 static inline void mod_memcg_page_state(struct page *page,
1404 					enum memcg_stat_item idx, int val)
1405 {
1406 }
1407 
memcg_page_state(struct mem_cgroup * memcg,int idx)1408 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1409 {
1410 	return 0;
1411 }
1412 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1413 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1414 					      enum node_stat_item idx)
1415 {
1416 	return node_page_state(lruvec_pgdat(lruvec), idx);
1417 }
1418 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1419 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1420 						    enum node_stat_item idx)
1421 {
1422 	return node_page_state(lruvec_pgdat(lruvec), idx);
1423 }
1424 
mem_cgroup_flush_stats(struct mem_cgroup * memcg)1425 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1426 {
1427 }
1428 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)1429 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1430 {
1431 }
1432 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1433 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1434 					   int val)
1435 {
1436 	struct page *page = virt_to_head_page(p);
1437 
1438 	__mod_node_page_state(page_pgdat(page), idx, val);
1439 }
1440 
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1441 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1442 					 int val)
1443 {
1444 	struct page *page = virt_to_head_page(p);
1445 
1446 	mod_node_page_state(page_pgdat(page), idx, val);
1447 }
1448 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1449 static inline void count_memcg_events(struct mem_cgroup *memcg,
1450 					enum vm_event_item idx,
1451 					unsigned long count)
1452 {
1453 }
1454 
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1455 static inline void count_memcg_folio_events(struct folio *folio,
1456 		enum vm_event_item idx, unsigned long nr)
1457 {
1458 }
1459 
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)1460 static inline void count_memcg_events_mm(struct mm_struct *mm,
1461 					enum vm_event_item idx, unsigned long count)
1462 {
1463 }
1464 
1465 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1466 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1467 {
1468 }
1469 
split_page_memcg(struct page * first,unsigned order)1470 static inline void split_page_memcg(struct page *first, unsigned order)
1471 {
1472 }
1473 
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)1474 static inline void folio_split_memcg_refs(struct folio *folio,
1475 		unsigned old_order, unsigned new_order)
1476 {
1477 }
1478 
cgroup_id_from_mm(struct mm_struct * mm)1479 static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
1480 {
1481 	return 0;
1482 }
1483 
mem_cgroup_init(void)1484 static inline int mem_cgroup_init(void) { return 0; }
1485 #endif /* CONFIG_MEMCG */
1486 
1487 /*
1488  * Extended information for slab objects stored as an array in page->memcg_data
1489  * if MEMCG_DATA_OBJEXTS is set.
1490  */
1491 struct slabobj_ext {
1492 #ifdef CONFIG_MEMCG
1493 	struct obj_cgroup *objcg;
1494 #endif
1495 #ifdef CONFIG_MEM_ALLOC_PROFILING
1496 	union codetag_ref ref;
1497 #endif
1498 } __aligned(8);
1499 
__inc_lruvec_kmem_state(void * p,enum node_stat_item idx)1500 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1501 {
1502 	__mod_lruvec_kmem_state(p, idx, 1);
1503 }
1504 
__dec_lruvec_kmem_state(void * p,enum node_stat_item idx)1505 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1506 {
1507 	__mod_lruvec_kmem_state(p, idx, -1);
1508 }
1509 
parent_lruvec(struct lruvec * lruvec)1510 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1511 {
1512 	struct mem_cgroup *memcg;
1513 
1514 	memcg = lruvec_memcg(lruvec);
1515 	if (!memcg)
1516 		return NULL;
1517 	memcg = parent_mem_cgroup(memcg);
1518 	if (!memcg)
1519 		return NULL;
1520 	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1521 }
1522 
unlock_page_lruvec(struct lruvec * lruvec)1523 static inline void unlock_page_lruvec(struct lruvec *lruvec)
1524 {
1525 	spin_unlock(&lruvec->lru_lock);
1526 }
1527 
unlock_page_lruvec_irq(struct lruvec * lruvec)1528 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1529 {
1530 	spin_unlock_irq(&lruvec->lru_lock);
1531 }
1532 
unlock_page_lruvec_irqrestore(struct lruvec * lruvec,unsigned long flags)1533 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1534 		unsigned long flags)
1535 {
1536 	spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1537 }
1538 
1539 /* Test requires a stable folio->memcg binding, see folio_memcg() */
folio_matches_lruvec(struct folio * folio,struct lruvec * lruvec)1540 static inline bool folio_matches_lruvec(struct folio *folio,
1541 		struct lruvec *lruvec)
1542 {
1543 	return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1544 	       lruvec_memcg(lruvec) == folio_memcg(folio);
1545 }
1546 
1547 /* Don't lock again iff page's lruvec locked */
folio_lruvec_relock_irq(struct folio * folio,struct lruvec * locked_lruvec)1548 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1549 		struct lruvec *locked_lruvec)
1550 {
1551 	if (locked_lruvec) {
1552 		if (folio_matches_lruvec(folio, locked_lruvec))
1553 			return locked_lruvec;
1554 
1555 		unlock_page_lruvec_irq(locked_lruvec);
1556 	}
1557 
1558 	return folio_lruvec_lock_irq(folio);
1559 }
1560 
1561 /* Don't lock again iff folio's lruvec locked */
folio_lruvec_relock_irqsave(struct folio * folio,struct lruvec ** lruvecp,unsigned long * flags)1562 static inline void folio_lruvec_relock_irqsave(struct folio *folio,
1563 		struct lruvec **lruvecp, unsigned long *flags)
1564 {
1565 	if (*lruvecp) {
1566 		if (folio_matches_lruvec(folio, *lruvecp))
1567 			return;
1568 
1569 		unlock_page_lruvec_irqrestore(*lruvecp, *flags);
1570 	}
1571 
1572 	*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
1573 }
1574 
1575 #ifdef CONFIG_CGROUP_WRITEBACK
1576 
1577 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1578 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1579 			 unsigned long *pheadroom, unsigned long *pdirty,
1580 			 unsigned long *pwriteback);
1581 
1582 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1583 					     struct bdi_writeback *wb);
1584 
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1585 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1586 						  struct bdi_writeback *wb)
1587 {
1588 	struct mem_cgroup *memcg;
1589 
1590 	if (mem_cgroup_disabled())
1591 		return;
1592 
1593 	memcg = folio_memcg(folio);
1594 	if (unlikely(memcg && &memcg->css != wb->memcg_css))
1595 		mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1596 }
1597 
1598 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1599 
1600 #else	/* CONFIG_CGROUP_WRITEBACK */
1601 
mem_cgroup_wb_domain(struct bdi_writeback * wb)1602 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1603 {
1604 	return NULL;
1605 }
1606 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1607 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1608 				       unsigned long *pfilepages,
1609 				       unsigned long *pheadroom,
1610 				       unsigned long *pdirty,
1611 				       unsigned long *pwriteback)
1612 {
1613 }
1614 
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1615 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1616 						  struct bdi_writeback *wb)
1617 {
1618 }
1619 
mem_cgroup_flush_foreign(struct bdi_writeback * wb)1620 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1621 {
1622 }
1623 
1624 #endif	/* CONFIG_CGROUP_WRITEBACK */
1625 
1626 struct sock;
1627 #ifdef CONFIG_MEMCG
1628 extern struct static_key_false memcg_sockets_enabled_key;
1629 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1630 
1631 void mem_cgroup_sk_alloc(struct sock *sk);
1632 void mem_cgroup_sk_free(struct sock *sk);
1633 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
1634 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
1635 			  gfp_t gfp_mask);
1636 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
1637 
1638 #if BITS_PER_LONG < 64
mem_cgroup_set_socket_pressure(struct mem_cgroup * memcg)1639 static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
1640 {
1641 	u64 val = get_jiffies_64() + HZ;
1642 	unsigned long flags;
1643 
1644 	write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
1645 	memcg->socket_pressure = val;
1646 	write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
1647 }
1648 
mem_cgroup_get_socket_pressure(struct mem_cgroup * memcg)1649 static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
1650 {
1651 	unsigned int seq;
1652 	u64 val;
1653 
1654 	do {
1655 		seq = read_seqbegin(&memcg->socket_pressure_seqlock);
1656 		val = memcg->socket_pressure;
1657 	} while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
1658 
1659 	return val;
1660 }
1661 #else
mem_cgroup_set_socket_pressure(struct mem_cgroup * memcg)1662 static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
1663 {
1664 	WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
1665 }
1666 
mem_cgroup_get_socket_pressure(struct mem_cgroup * memcg)1667 static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
1668 {
1669 	return READ_ONCE(memcg->socket_pressure);
1670 }
1671 #endif
1672 
1673 int alloc_shrinker_info(struct mem_cgroup *memcg);
1674 void free_shrinker_info(struct mem_cgroup *memcg);
1675 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1676 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1677 #else
1678 #define mem_cgroup_sockets_enabled 0
1679 
mem_cgroup_sk_alloc(struct sock * sk)1680 static inline void mem_cgroup_sk_alloc(struct sock *sk)
1681 {
1682 }
1683 
mem_cgroup_sk_free(struct sock * sk)1684 static inline void mem_cgroup_sk_free(struct sock *sk)
1685 {
1686 }
1687 
mem_cgroup_sk_inherit(const struct sock * sk,struct sock * newsk)1688 static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
1689 {
1690 }
1691 
mem_cgroup_sk_charge(const struct sock * sk,unsigned int nr_pages,gfp_t gfp_mask)1692 static inline bool mem_cgroup_sk_charge(const struct sock *sk,
1693 					unsigned int nr_pages,
1694 					gfp_t gfp_mask)
1695 {
1696 	return false;
1697 }
1698 
mem_cgroup_sk_uncharge(const struct sock * sk,unsigned int nr_pages)1699 static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
1700 					  unsigned int nr_pages)
1701 {
1702 }
1703 
set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1704 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1705 				    int nid, int shrinker_id)
1706 {
1707 }
1708 #endif
1709 
1710 #ifdef CONFIG_MEMCG
1711 bool mem_cgroup_kmem_disabled(void);
1712 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1713 void __memcg_kmem_uncharge_page(struct page *page, int order);
1714 
1715 /*
1716  * The returned objcg pointer is safe to use without additional
1717  * protection within a scope. The scope is defined either by
1718  * the current task (similar to the "current" global variable)
1719  * or by set_active_memcg() pair.
1720  * Please, use obj_cgroup_get() to get a reference if the pointer
1721  * needs to be used outside of the local scope.
1722  */
1723 struct obj_cgroup *current_obj_cgroup(void);
1724 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
1725 
get_obj_cgroup_from_current(void)1726 static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
1727 {
1728 	struct obj_cgroup *objcg = current_obj_cgroup();
1729 
1730 	if (objcg)
1731 		obj_cgroup_get(objcg);
1732 
1733 	return objcg;
1734 }
1735 
1736 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1737 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1738 
1739 extern struct static_key_false memcg_bpf_enabled_key;
memcg_bpf_enabled(void)1740 static inline bool memcg_bpf_enabled(void)
1741 {
1742 	return static_branch_likely(&memcg_bpf_enabled_key);
1743 }
1744 
1745 extern struct static_key_false memcg_kmem_online_key;
1746 
memcg_kmem_online(void)1747 static inline bool memcg_kmem_online(void)
1748 {
1749 	return static_branch_likely(&memcg_kmem_online_key);
1750 }
1751 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1752 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1753 					 int order)
1754 {
1755 	if (memcg_kmem_online())
1756 		return __memcg_kmem_charge_page(page, gfp, order);
1757 	return 0;
1758 }
1759 
memcg_kmem_uncharge_page(struct page * page,int order)1760 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1761 {
1762 	if (memcg_kmem_online())
1763 		__memcg_kmem_uncharge_page(page, order);
1764 }
1765 
1766 /*
1767  * A helper for accessing memcg's kmem_id, used for getting
1768  * corresponding LRU lists.
1769  */
memcg_kmem_id(struct mem_cgroup * memcg)1770 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1771 {
1772 	return memcg ? memcg->kmemcg_id : -1;
1773 }
1774 
1775 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
1776 
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1777 static inline void count_objcg_events(struct obj_cgroup *objcg,
1778 				      enum vm_event_item idx,
1779 				      unsigned long count)
1780 {
1781 	struct mem_cgroup *memcg;
1782 
1783 	if (!memcg_kmem_online())
1784 		return;
1785 
1786 	rcu_read_lock();
1787 	memcg = obj_cgroup_memcg(objcg);
1788 	count_memcg_events(memcg, idx, count);
1789 	rcu_read_unlock();
1790 }
1791 
1792 bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
1793 
1794 #else
mem_cgroup_kmem_disabled(void)1795 static inline bool mem_cgroup_kmem_disabled(void)
1796 {
1797 	return true;
1798 }
1799 
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1800 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1801 					 int order)
1802 {
1803 	return 0;
1804 }
1805 
memcg_kmem_uncharge_page(struct page * page,int order)1806 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1807 {
1808 }
1809 
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1810 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1811 					   int order)
1812 {
1813 	return 0;
1814 }
1815 
__memcg_kmem_uncharge_page(struct page * page,int order)1816 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1817 {
1818 }
1819 
get_obj_cgroup_from_folio(struct folio * folio)1820 static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
1821 {
1822 	return NULL;
1823 }
1824 
memcg_bpf_enabled(void)1825 static inline bool memcg_bpf_enabled(void)
1826 {
1827 	return false;
1828 }
1829 
memcg_kmem_online(void)1830 static inline bool memcg_kmem_online(void)
1831 {
1832 	return false;
1833 }
1834 
memcg_kmem_id(struct mem_cgroup * memcg)1835 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1836 {
1837 	return -1;
1838 }
1839 
mem_cgroup_from_slab_obj(void * p)1840 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1841 {
1842 	return NULL;
1843 }
1844 
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1845 static inline void count_objcg_events(struct obj_cgroup *objcg,
1846 				      enum vm_event_item idx,
1847 				      unsigned long count)
1848 {
1849 }
1850 
page_cgroup_ino(struct page * page)1851 static inline ino_t page_cgroup_ino(struct page *page)
1852 {
1853 	return 0;
1854 }
1855 
mem_cgroup_node_allowed(struct mem_cgroup * memcg,int nid)1856 static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
1857 {
1858 	return true;
1859 }
1860 #endif /* CONFIG_MEMCG */
1861 
1862 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
1863 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1864 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1865 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1866 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1867 #else
obj_cgroup_may_zswap(struct obj_cgroup * objcg)1868 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1869 {
1870 	return true;
1871 }
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)1872 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1873 					   size_t size)
1874 {
1875 }
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)1876 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1877 					     size_t size)
1878 {
1879 }
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)1880 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
1881 {
1882 	/* if zswap is disabled, do not block pages going to the swapping device */
1883 	return true;
1884 }
1885 #endif
1886 
1887 
1888 /* Cgroup v1-related declarations */
1889 
1890 #ifdef CONFIG_MEMCG_V1
1891 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1892 					gfp_t gfp_mask,
1893 					unsigned long *total_scanned);
1894 
1895 bool mem_cgroup_oom_synchronize(bool wait);
1896 
task_in_memcg_oom(struct task_struct * p)1897 static inline bool task_in_memcg_oom(struct task_struct *p)
1898 {
1899 	return p->memcg_in_oom;
1900 }
1901 
mem_cgroup_enter_user_fault(void)1902 static inline void mem_cgroup_enter_user_fault(void)
1903 {
1904 	WARN_ON(current->in_user_fault);
1905 	current->in_user_fault = 1;
1906 }
1907 
mem_cgroup_exit_user_fault(void)1908 static inline void mem_cgroup_exit_user_fault(void)
1909 {
1910 	WARN_ON(!current->in_user_fault);
1911 	current->in_user_fault = 0;
1912 }
1913 
1914 void memcg1_swapout(struct folio *folio, swp_entry_t entry);
1915 void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
1916 
1917 #else /* CONFIG_MEMCG_V1 */
1918 static inline
memcg1_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1919 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1920 					gfp_t gfp_mask,
1921 					unsigned long *total_scanned)
1922 {
1923 	return 0;
1924 }
1925 
task_in_memcg_oom(struct task_struct * p)1926 static inline bool task_in_memcg_oom(struct task_struct *p)
1927 {
1928 	return false;
1929 }
1930 
mem_cgroup_oom_synchronize(bool wait)1931 static inline bool mem_cgroup_oom_synchronize(bool wait)
1932 {
1933 	return false;
1934 }
1935 
mem_cgroup_enter_user_fault(void)1936 static inline void mem_cgroup_enter_user_fault(void)
1937 {
1938 }
1939 
mem_cgroup_exit_user_fault(void)1940 static inline void mem_cgroup_exit_user_fault(void)
1941 {
1942 }
1943 
memcg1_swapout(struct folio * folio,swp_entry_t entry)1944 static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
1945 {
1946 }
1947 
memcg1_swapin(swp_entry_t entry,unsigned int nr_pages)1948 static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
1949 {
1950 }
1951 
1952 #endif /* CONFIG_MEMCG_V1 */
1953 
1954 #endif /* _LINUX_MEMCONTROL_H */
1955