1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_COUNTER_H
3 #define _LINUX_PAGE_COUNTER_H
4
5 #include <linux/atomic.h>
6 #include <linux/cache.h>
7 #include <linux/limits.h>
8 #include <asm/page.h>
9
10 struct page_counter {
11 /*
12 * Make sure 'usage' does not share cacheline with any other field. The
13 * memcg->memory.usage is a hot member of struct mem_cgroup.
14 */
15 atomic_long_t usage;
16 CACHELINE_PADDING(_pad1_);
17
18 /* effective memory.min and memory.min usage tracking */
19 unsigned long emin;
20 atomic_long_t min_usage;
21 atomic_long_t children_min_usage;
22
23 /* effective memory.low and memory.low usage tracking */
24 unsigned long elow;
25 atomic_long_t low_usage;
26 atomic_long_t children_low_usage;
27
28 unsigned long watermark;
29 /* Latest cg2 reset watermark */
30 unsigned long local_watermark;
31 unsigned long failcnt;
32
33 /* Keep all the read most fields in a separete cacheline. */
34 CACHELINE_PADDING(_pad2_);
35
36 bool protection_support;
37 unsigned long min;
38 unsigned long low;
39 unsigned long high;
40 unsigned long max;
41 struct page_counter *parent;
42 } ____cacheline_internodealigned_in_smp;
43
44 #if BITS_PER_LONG == 32
45 #define PAGE_COUNTER_MAX LONG_MAX
46 #else
47 #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
48 #endif
49
50 /*
51 * Protection is supported only for the first counter (with id 0).
52 */
page_counter_init(struct page_counter * counter,struct page_counter * parent,bool protection_support)53 static inline void page_counter_init(struct page_counter *counter,
54 struct page_counter *parent,
55 bool protection_support)
56 {
57 counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
58 counter->max = PAGE_COUNTER_MAX;
59 counter->parent = parent;
60 counter->protection_support = protection_support;
61 }
62
page_counter_read(struct page_counter * counter)63 static inline unsigned long page_counter_read(struct page_counter *counter)
64 {
65 return atomic_long_read(&counter->usage);
66 }
67
68 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
69 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
70 bool page_counter_try_charge(struct page_counter *counter,
71 unsigned long nr_pages,
72 struct page_counter **fail);
73 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
74 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
75 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
76
page_counter_set_high(struct page_counter * counter,unsigned long nr_pages)77 static inline void page_counter_set_high(struct page_counter *counter,
78 unsigned long nr_pages)
79 {
80 WRITE_ONCE(counter->high, nr_pages);
81 }
82
83 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
84 int page_counter_memparse(const char *buf, const char *max,
85 unsigned long *nr_pages);
86
page_counter_reset_watermark(struct page_counter * counter)87 static inline void page_counter_reset_watermark(struct page_counter *counter)
88 {
89 unsigned long usage = page_counter_read(counter);
90
91 /*
92 * Update local_watermark first, so it's always <= watermark
93 * (modulo CPU/compiler re-ordering)
94 */
95 counter->local_watermark = usage;
96 counter->watermark = usage;
97 }
98
99 #ifdef CONFIG_MEMCG
100 void page_counter_calculate_protection(struct page_counter *root,
101 struct page_counter *counter,
102 bool recursive_protection);
103 #else
page_counter_calculate_protection(struct page_counter * root,struct page_counter * counter,bool recursive_protection)104 static inline void page_counter_calculate_protection(struct page_counter *root,
105 struct page_counter *counter,
106 bool recursive_protection) {}
107 #endif
108
109 #endif /* _LINUX_PAGE_COUNTER_H */
110