1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * page allocation tagging
4 */
5 #ifndef _LINUX_PGALLOC_TAG_H
6 #define _LINUX_PGALLOC_TAG_H
7
8 #include <linux/alloc_tag.h>
9
10 #ifdef CONFIG_MEM_ALLOC_PROFILING
11
12 #include <linux/page_ext.h>
13
14 extern struct page_ext_operations page_alloc_tagging_ops;
15 extern unsigned long alloc_tag_ref_mask;
16 extern int alloc_tag_ref_offs;
17 extern struct alloc_tag_kernel_section kernel_tags;
18
19 DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed);
20
21 typedef u16 pgalloc_tag_idx;
22
23 union pgtag_ref_handle {
24 union codetag_ref *ref; /* reference in page extension */
25 struct page *page; /* reference in page flags */
26 };
27
28 /* Reserved indexes */
29 #define CODETAG_ID_NULL 0
30 #define CODETAG_ID_EMPTY 1
31 #define CODETAG_ID_FIRST 2
32
33 #ifdef CONFIG_MODULES
34
35 extern struct alloc_tag_module_section module_tags;
36
module_idx_to_tag(pgalloc_tag_idx idx)37 static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
38 {
39 return &module_tags.first_tag[idx - kernel_tags.count];
40 }
41
module_tag_to_idx(struct alloc_tag * tag)42 static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
43 {
44 return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag);
45 }
46
47 #else /* CONFIG_MODULES */
48
module_idx_to_tag(pgalloc_tag_idx idx)49 static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
50 {
51 pr_warn("invalid page tag reference %lu\n", (unsigned long)idx);
52 return NULL;
53 }
54
module_tag_to_idx(struct alloc_tag * tag)55 static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
56 {
57 pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag);
58 return CODETAG_ID_NULL;
59 }
60
61 #endif /* CONFIG_MODULES */
62
idx_to_ref(pgalloc_tag_idx idx,union codetag_ref * ref)63 static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref)
64 {
65 switch (idx) {
66 case (CODETAG_ID_NULL):
67 ref->ct = NULL;
68 break;
69 case (CODETAG_ID_EMPTY):
70 set_codetag_empty(ref);
71 break;
72 default:
73 idx -= CODETAG_ID_FIRST;
74 ref->ct = idx < kernel_tags.count ?
75 &kernel_tags.first_tag[idx].ct :
76 &module_idx_to_tag(idx)->ct;
77 break;
78 }
79 }
80
ref_to_idx(union codetag_ref * ref)81 static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref)
82 {
83 struct alloc_tag *tag;
84
85 if (!ref->ct)
86 return CODETAG_ID_NULL;
87
88 if (is_codetag_empty(ref))
89 return CODETAG_ID_EMPTY;
90
91 tag = ct_to_alloc_tag(ref->ct);
92 if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count)
93 return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag);
94
95 return module_tag_to_idx(tag);
96 }
97
98
99
100 /* Should be called only if mem_alloc_profiling_enabled() */
get_page_tag_ref(struct page * page,union codetag_ref * ref,union pgtag_ref_handle * handle)101 static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
102 union pgtag_ref_handle *handle)
103 {
104 if (!page)
105 return false;
106
107 if (static_key_enabled(&mem_profiling_compressed)) {
108 pgalloc_tag_idx idx;
109
110 idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask;
111 idx_to_ref(idx, ref);
112 handle->page = page;
113 } else {
114 struct page_ext *page_ext;
115 union codetag_ref *tmp;
116
117 page_ext = page_ext_get(page);
118 if (!page_ext)
119 return false;
120
121 tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
122 ref->ct = tmp->ct;
123 handle->ref = tmp;
124 }
125
126 return true;
127 }
128
put_page_tag_ref(union pgtag_ref_handle handle)129 static inline void put_page_tag_ref(union pgtag_ref_handle handle)
130 {
131 if (WARN_ON(!handle.ref))
132 return;
133
134 if (!static_key_enabled(&mem_profiling_compressed))
135 page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset);
136 }
137
update_page_tag_ref(union pgtag_ref_handle handle,union codetag_ref * ref)138 static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref)
139 {
140 if (static_key_enabled(&mem_profiling_compressed)) {
141 struct page *page = handle.page;
142 unsigned long old_flags;
143 unsigned long flags;
144 unsigned long idx;
145
146 if (WARN_ON(!page || !ref))
147 return;
148
149 idx = (unsigned long)ref_to_idx(ref);
150 idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
151 do {
152 old_flags = READ_ONCE(page->flags);
153 flags = old_flags;
154 flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
155 flags |= idx;
156 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
157 } else {
158 if (WARN_ON(!handle.ref || !ref))
159 return;
160
161 handle.ref->ct = ref->ct;
162 }
163 }
164
clear_page_tag_ref(struct page * page)165 static inline void clear_page_tag_ref(struct page *page)
166 {
167 if (mem_alloc_profiling_enabled()) {
168 union pgtag_ref_handle handle;
169 union codetag_ref ref;
170
171 if (get_page_tag_ref(page, &ref, &handle)) {
172 set_codetag_empty(&ref);
173 update_page_tag_ref(handle, &ref);
174 put_page_tag_ref(handle);
175 }
176 }
177 }
178
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)179 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
180 unsigned int nr)
181 {
182 if (mem_alloc_profiling_enabled()) {
183 union pgtag_ref_handle handle;
184 union codetag_ref ref;
185
186 if (get_page_tag_ref(page, &ref, &handle)) {
187 alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
188 update_page_tag_ref(handle, &ref);
189 put_page_tag_ref(handle);
190 }
191 }
192 }
193
pgalloc_tag_sub(struct page * page,unsigned int nr)194 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
195 {
196 if (mem_alloc_profiling_enabled()) {
197 union pgtag_ref_handle handle;
198 union codetag_ref ref;
199
200 if (get_page_tag_ref(page, &ref, &handle)) {
201 alloc_tag_sub(&ref, PAGE_SIZE * nr);
202 update_page_tag_ref(handle, &ref);
203 put_page_tag_ref(handle);
204 }
205 }
206 }
207
pgalloc_tag_get(struct page * page)208 static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
209 {
210 struct alloc_tag *tag = NULL;
211
212 if (mem_alloc_profiling_enabled()) {
213 union pgtag_ref_handle handle;
214 union codetag_ref ref;
215
216 if (get_page_tag_ref(page, &ref, &handle)) {
217 alloc_tag_sub_check(&ref);
218 if (ref.ct)
219 tag = ct_to_alloc_tag(ref.ct);
220 put_page_tag_ref(handle);
221 }
222 }
223
224 return tag;
225 }
226
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)227 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
228 {
229 if (mem_alloc_profiling_enabled() && tag)
230 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
231 }
232
233 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
234 void pgalloc_tag_swap(struct folio *new, struct folio *old);
235
236 void __init alloc_tag_sec_init(void);
237
238 #else /* CONFIG_MEM_ALLOC_PROFILING */
239
clear_page_tag_ref(struct page * page)240 static inline void clear_page_tag_ref(struct page *page) {}
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)241 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
242 unsigned int nr) {}
pgalloc_tag_sub(struct page * page,unsigned int nr)243 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
pgalloc_tag_get(struct page * page)244 static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)245 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
alloc_tag_sec_init(void)246 static inline void alloc_tag_sec_init(void) {}
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)247 static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
pgalloc_tag_swap(struct folio * new,struct folio * old)248 static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
249
250 #endif /* CONFIG_MEM_ALLOC_PROFILING */
251
252 #endif /* _LINUX_PGALLOC_TAG_H */
253