xref: /linux/include/linux/alloc_tag.h (revision 0f8c0258bf042a7da8645148f96d063b9c2060b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * allocation tagging
4  */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
7 
8 #include <linux/bug.h>
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/smp.h>
15 #include <linux/static_key.h>
16 #include <linux/irqflags.h>
17 
18 struct alloc_tag_counters {
19 	u64 bytes;
20 	u64 calls;
21 };
22 
23 /*
24  * An instance of this structure is created in a special ELF section at every
25  * allocation callsite. At runtime, the special section is treated as
26  * an array of these. Embedded codetag utilizes codetag framework.
27  */
28 struct alloc_tag {
29 	struct codetag			ct;
30 	struct alloc_tag_counters __percpu	*counters;
31 } __aligned(8);
32 
33 struct alloc_tag_kernel_section {
34 	struct alloc_tag *first_tag;
35 	unsigned long count;
36 };
37 
38 struct alloc_tag_module_section {
39 	union {
40 		unsigned long start_addr;
41 		struct alloc_tag *first_tag;
42 	};
43 	unsigned long end_addr;
44 	/* used size */
45 	unsigned long size;
46 };
47 
48 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
49 
50 #define CODETAG_EMPTY	((void *)1)
51 
is_codetag_empty(union codetag_ref * ref)52 static inline bool is_codetag_empty(union codetag_ref *ref)
53 {
54 	return ref->ct == CODETAG_EMPTY;
55 }
56 
set_codetag_empty(union codetag_ref * ref)57 static inline void set_codetag_empty(union codetag_ref *ref)
58 {
59 	if (ref)
60 		ref->ct = CODETAG_EMPTY;
61 }
62 
63 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
64 
is_codetag_empty(union codetag_ref * ref)65 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
66 
set_codetag_empty(union codetag_ref * ref)67 static inline void set_codetag_empty(union codetag_ref *ref)
68 {
69 	if (ref)
70 		ref->ct = NULL;
71 }
72 
73 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
74 
75 #ifdef CONFIG_MEM_ALLOC_PROFILING
76 
77 #define ALLOC_TAG_SECTION_NAME	"alloc_tags"
78 
79 struct codetag_bytes {
80 	struct codetag *ct;
81 	s64 bytes;
82 };
83 
84 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
85 
ct_to_alloc_tag(struct codetag * ct)86 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
87 {
88 	return container_of(ct, struct alloc_tag, ct);
89 }
90 
91 #ifdef ARCH_NEEDS_WEAK_PER_CPU
92 /*
93  * When percpu variables are required to be defined as weak, static percpu
94  * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
95  * Instead we will account all module allocations to a single counter.
96  */
97 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
98 
99 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
100 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
101 	__section(ALLOC_TAG_SECTION_NAME) = {					\
102 		.ct = CODE_TAG_INIT,						\
103 		.counters = &_shared_alloc_tag };
104 
105 #else /* ARCH_NEEDS_WEAK_PER_CPU */
106 
107 #ifdef MODULE
108 
109 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
110 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
111 	__section(ALLOC_TAG_SECTION_NAME) = {					\
112 		.ct = CODE_TAG_INIT,						\
113 		.counters = NULL };
114 
115 #else  /* MODULE */
116 
117 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
118 	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
119 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
120 	__section(ALLOC_TAG_SECTION_NAME) = {					\
121 		.ct = CODE_TAG_INIT,						\
122 		.counters = &_alloc_tag_cntr };
123 
124 #endif /* MODULE */
125 
126 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
127 
128 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
129 			mem_alloc_profiling_key);
130 
mem_alloc_profiling_enabled(void)131 static inline bool mem_alloc_profiling_enabled(void)
132 {
133 	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
134 				   &mem_alloc_profiling_key);
135 }
136 
alloc_tag_read(struct alloc_tag * tag)137 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
138 {
139 	struct alloc_tag_counters v = { 0, 0 };
140 	struct alloc_tag_counters *counter;
141 	int cpu;
142 
143 	for_each_possible_cpu(cpu) {
144 		counter = per_cpu_ptr(tag->counters, cpu);
145 		v.bytes += counter->bytes;
146 		v.calls += counter->calls;
147 	}
148 
149 	return v;
150 }
151 
152 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
alloc_tag_add_check(union codetag_ref * ref,struct alloc_tag * tag)153 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
154 {
155 	WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
156 		  "alloc_tag was not cleared (got tag for %s:%u)\n",
157 		  ref->ct->filename, ref->ct->lineno);
158 
159 	WARN_ONCE(!tag, "current->alloc_tag not set\n");
160 }
161 
alloc_tag_sub_check(union codetag_ref * ref)162 static inline void alloc_tag_sub_check(union codetag_ref *ref)
163 {
164 	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
165 }
166 #else
alloc_tag_add_check(union codetag_ref * ref,struct alloc_tag * tag)167 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
alloc_tag_sub_check(union codetag_ref * ref)168 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
169 #endif
170 
171 /* Caller should verify both ref and tag to be valid */
__alloc_tag_ref_set(union codetag_ref * ref,struct alloc_tag * tag)172 static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
173 {
174 	alloc_tag_add_check(ref, tag);
175 	if (!ref || !tag)
176 		return false;
177 
178 	ref->ct = &tag->ct;
179 	return true;
180 }
181 
alloc_tag_ref_set(union codetag_ref * ref,struct alloc_tag * tag)182 static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
183 {
184 	if (unlikely(!__alloc_tag_ref_set(ref, tag)))
185 		return false;
186 
187 	/*
188 	 * We need in increment the call counter every time we have a new
189 	 * allocation or when we split a large allocation into smaller ones.
190 	 * Each new reference for every sub-allocation needs to increment call
191 	 * counter because when we free each part the counter will be decremented.
192 	 */
193 	this_cpu_inc(tag->counters->calls);
194 	return true;
195 }
196 
alloc_tag_add(union codetag_ref * ref,struct alloc_tag * tag,size_t bytes)197 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
198 {
199 	if (likely(alloc_tag_ref_set(ref, tag)))
200 		this_cpu_add(tag->counters->bytes, bytes);
201 }
202 
alloc_tag_sub(union codetag_ref * ref,size_t bytes)203 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
204 {
205 	struct alloc_tag *tag;
206 
207 	alloc_tag_sub_check(ref);
208 	if (!ref || !ref->ct)
209 		return;
210 
211 	if (is_codetag_empty(ref)) {
212 		ref->ct = NULL;
213 		return;
214 	}
215 
216 	tag = ct_to_alloc_tag(ref->ct);
217 
218 	this_cpu_sub(tag->counters->bytes, bytes);
219 	this_cpu_dec(tag->counters->calls);
220 
221 	ref->ct = NULL;
222 }
223 
224 #define alloc_tag_record(p)	((p) = current->alloc_tag)
225 
226 #else /* CONFIG_MEM_ALLOC_PROFILING */
227 
228 #define DEFINE_ALLOC_TAG(_alloc_tag)
mem_alloc_profiling_enabled(void)229 static inline bool mem_alloc_profiling_enabled(void) { return false; }
alloc_tag_add(union codetag_ref * ref,struct alloc_tag * tag,size_t bytes)230 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
231 				 size_t bytes) {}
alloc_tag_sub(union codetag_ref * ref,size_t bytes)232 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
233 #define alloc_tag_record(p)	do {} while (0)
234 
235 #endif /* CONFIG_MEM_ALLOC_PROFILING */
236 
237 #define alloc_hooks_tag(_tag, _do_alloc)				\
238 ({									\
239 	typeof(_do_alloc) _res;						\
240 	if (mem_alloc_profiling_enabled()) {				\
241 		struct alloc_tag * __maybe_unused _old;			\
242 		_old = alloc_tag_save(_tag);				\
243 		_res = _do_alloc;					\
244 		alloc_tag_restore(_tag, _old);				\
245 	} else								\
246 		_res = _do_alloc;					\
247 	_res;								\
248 })
249 
250 #define alloc_hooks(_do_alloc)						\
251 ({									\
252 	DEFINE_ALLOC_TAG(_alloc_tag);					\
253 	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
254 })
255 
256 #endif /* _LINUX_ALLOC_TAG_H */
257