xref: /linux/lib/alloc_tag.c (revision c8faf11cd192214e231626c3ee973a35d8fc33f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/alloc_tag.h>
3 #include <linux/fs.h>
4 #include <linux/gfp.h>
5 #include <linux/module.h>
6 #include <linux/page_ext.h>
7 #include <linux/proc_fs.h>
8 #include <linux/seq_buf.h>
9 #include <linux/seq_file.h>
10 
11 static struct codetag_type *alloc_tag_cttype;
12 
13 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
14 EXPORT_SYMBOL(_shared_alloc_tag);
15 
16 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
17 			mem_alloc_profiling_key);
18 
19 struct allocinfo_private {
20 	struct codetag_iterator iter;
21 	bool print_header;
22 };
23 
24 static void *allocinfo_start(struct seq_file *m, loff_t *pos)
25 {
26 	struct allocinfo_private *priv;
27 	struct codetag *ct;
28 	loff_t node = *pos;
29 
30 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
31 	m->private = priv;
32 	if (!priv)
33 		return NULL;
34 
35 	priv->print_header = (node == 0);
36 	codetag_lock_module_list(alloc_tag_cttype, true);
37 	priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
38 	while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
39 		node--;
40 
41 	return ct ? priv : NULL;
42 }
43 
44 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
45 {
46 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
47 	struct codetag *ct = codetag_next_ct(&priv->iter);
48 
49 	(*pos)++;
50 	if (!ct)
51 		return NULL;
52 
53 	return priv;
54 }
55 
56 static void allocinfo_stop(struct seq_file *m, void *arg)
57 {
58 	struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
59 
60 	if (priv) {
61 		codetag_lock_module_list(alloc_tag_cttype, false);
62 		kfree(priv);
63 	}
64 }
65 
66 static void print_allocinfo_header(struct seq_buf *buf)
67 {
68 	/* Output format version, so we can change it. */
69 	seq_buf_printf(buf, "allocinfo - version: 1.0\n");
70 	seq_buf_printf(buf, "#     <size>  <calls> <tag info>\n");
71 }
72 
73 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
74 {
75 	struct alloc_tag *tag = ct_to_alloc_tag(ct);
76 	struct alloc_tag_counters counter = alloc_tag_read(tag);
77 	s64 bytes = counter.bytes;
78 
79 	seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
80 	codetag_to_text(out, ct);
81 	seq_buf_putc(out, ' ');
82 	seq_buf_putc(out, '\n');
83 }
84 
85 static int allocinfo_show(struct seq_file *m, void *arg)
86 {
87 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
88 	char *bufp;
89 	size_t n = seq_get_buf(m, &bufp);
90 	struct seq_buf buf;
91 
92 	seq_buf_init(&buf, bufp, n);
93 	if (priv->print_header) {
94 		print_allocinfo_header(&buf);
95 		priv->print_header = false;
96 	}
97 	alloc_tag_to_text(&buf, priv->iter.ct);
98 	seq_commit(m, seq_buf_used(&buf));
99 	return 0;
100 }
101 
102 static const struct seq_operations allocinfo_seq_op = {
103 	.start	= allocinfo_start,
104 	.next	= allocinfo_next,
105 	.stop	= allocinfo_stop,
106 	.show	= allocinfo_show,
107 };
108 
109 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
110 {
111 	struct codetag_iterator iter;
112 	struct codetag *ct;
113 	struct codetag_bytes n;
114 	unsigned int i, nr = 0;
115 
116 	if (can_sleep)
117 		codetag_lock_module_list(alloc_tag_cttype, true);
118 	else if (!codetag_trylock_module_list(alloc_tag_cttype))
119 		return 0;
120 
121 	iter = codetag_get_ct_iter(alloc_tag_cttype);
122 	while ((ct = codetag_next_ct(&iter))) {
123 		struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
124 
125 		n.ct	= ct;
126 		n.bytes = counter.bytes;
127 
128 		for (i = 0; i < nr; i++)
129 			if (n.bytes > tags[i].bytes)
130 				break;
131 
132 		if (i < count) {
133 			nr -= nr == count;
134 			memmove(&tags[i + 1],
135 				&tags[i],
136 				sizeof(tags[0]) * (nr - i));
137 			nr++;
138 			tags[i] = n;
139 		}
140 	}
141 
142 	codetag_lock_module_list(alloc_tag_cttype, false);
143 
144 	return nr;
145 }
146 
147 static void __init procfs_init(void)
148 {
149 	proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
150 }
151 
152 static bool alloc_tag_module_unload(struct codetag_type *cttype,
153 				    struct codetag_module *cmod)
154 {
155 	struct codetag_iterator iter = codetag_get_ct_iter(cttype);
156 	struct alloc_tag_counters counter;
157 	bool module_unused = true;
158 	struct alloc_tag *tag;
159 	struct codetag *ct;
160 
161 	for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
162 		if (iter.cmod != cmod)
163 			continue;
164 
165 		tag = ct_to_alloc_tag(ct);
166 		counter = alloc_tag_read(tag);
167 
168 		if (WARN(counter.bytes,
169 			 "%s:%u module %s func:%s has %llu allocated at module unload",
170 			 ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
171 			module_unused = false;
172 	}
173 
174 	return module_unused;
175 }
176 
177 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
178 static bool mem_profiling_support __meminitdata = true;
179 #else
180 static bool mem_profiling_support __meminitdata;
181 #endif
182 
183 static int __init setup_early_mem_profiling(char *str)
184 {
185 	bool enable;
186 
187 	if (!str || !str[0])
188 		return -EINVAL;
189 
190 	if (!strncmp(str, "never", 5)) {
191 		enable = false;
192 		mem_profiling_support = false;
193 	} else {
194 		int res;
195 
196 		res = kstrtobool(str, &enable);
197 		if (res)
198 			return res;
199 
200 		mem_profiling_support = true;
201 	}
202 
203 	if (enable != static_key_enabled(&mem_alloc_profiling_key)) {
204 		if (enable)
205 			static_branch_enable(&mem_alloc_profiling_key);
206 		else
207 			static_branch_disable(&mem_alloc_profiling_key);
208 	}
209 
210 	return 0;
211 }
212 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
213 
214 static __init bool need_page_alloc_tagging(void)
215 {
216 	return mem_profiling_support;
217 }
218 
219 static __init void init_page_alloc_tagging(void)
220 {
221 }
222 
223 struct page_ext_operations page_alloc_tagging_ops = {
224 	.size = sizeof(union codetag_ref),
225 	.need = need_page_alloc_tagging,
226 	.init = init_page_alloc_tagging,
227 };
228 EXPORT_SYMBOL(page_alloc_tagging_ops);
229 
230 #ifdef CONFIG_SYSCTL
231 static struct ctl_table memory_allocation_profiling_sysctls[] = {
232 	{
233 		.procname	= "mem_profiling",
234 		.data		= &mem_alloc_profiling_key,
235 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
236 		.mode		= 0444,
237 #else
238 		.mode		= 0644,
239 #endif
240 		.proc_handler	= proc_do_static_key,
241 	},
242 };
243 
244 static void __init sysctl_init(void)
245 {
246 	if (!mem_profiling_support)
247 		memory_allocation_profiling_sysctls[0].mode = 0444;
248 
249 	register_sysctl_init("vm", memory_allocation_profiling_sysctls);
250 }
251 #else /* CONFIG_SYSCTL */
252 static inline void sysctl_init(void) {}
253 #endif /* CONFIG_SYSCTL */
254 
255 static int __init alloc_tag_init(void)
256 {
257 	const struct codetag_type_desc desc = {
258 		.section	= "alloc_tags",
259 		.tag_size	= sizeof(struct alloc_tag),
260 		.module_unload	= alloc_tag_module_unload,
261 	};
262 
263 	alloc_tag_cttype = codetag_register_type(&desc);
264 	if (IS_ERR(alloc_tag_cttype))
265 		return PTR_ERR(alloc_tag_cttype);
266 
267 	sysctl_init();
268 	procfs_init();
269 
270 	return 0;
271 }
272 module_init(alloc_tag_init);
273