xref: /linux/kernel/trace/trace_stat.c (revision e8744fbc83188693f3590020b14d50df3387fc5a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure for statistic tracing (histogram output).
4  *
5  * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  *
7  * Based on the code from trace_branch.c which is
8  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
9  *
10  */
11 
12 #include <linux/security.h>
13 #include <linux/list.h>
14 #include <linux/slab.h>
15 #include <linux/rbtree.h>
16 #include <linux/tracefs.h>
17 #include "trace_stat.h"
18 #include "trace.h"
19 
20 
21 /*
22  * List of stat red-black nodes from a tracer
23  * We use a such tree to sort quickly the stat
24  * entries from the tracer.
25  */
26 struct stat_node {
27 	struct rb_node		node;
28 	void			*stat;
29 };
30 
31 /* A stat session is the stats output in one file */
32 struct stat_session {
33 	struct list_head	session_list;
34 	struct tracer_stat	*ts;
35 	struct rb_root		stat_root;
36 	struct mutex		stat_mutex;
37 	struct dentry		*file;
38 };
39 
40 /* All of the sessions currently in use. Each stat file embed one session */
41 static LIST_HEAD(all_stat_sessions);
42 static DEFINE_MUTEX(all_stat_sessions_mutex);
43 
44 /* The root directory for all stat files */
45 static struct dentry		*stat_dir;
46 
__reset_stat_session(struct stat_session * session)47 static void __reset_stat_session(struct stat_session *session)
48 {
49 	struct stat_node *snode, *n;
50 
51 	rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
52 		if (session->ts->stat_release)
53 			session->ts->stat_release(snode->stat);
54 		kfree(snode);
55 	}
56 
57 	session->stat_root = RB_ROOT;
58 }
59 
reset_stat_session(struct stat_session * session)60 static void reset_stat_session(struct stat_session *session)
61 {
62 	mutex_lock(&session->stat_mutex);
63 	__reset_stat_session(session);
64 	mutex_unlock(&session->stat_mutex);
65 }
66 
destroy_session(struct stat_session * session)67 static void destroy_session(struct stat_session *session)
68 {
69 	tracefs_remove(session->file);
70 	__reset_stat_session(session);
71 	mutex_destroy(&session->stat_mutex);
72 	kfree(session);
73 }
74 
insert_stat(struct rb_root * root,void * stat,cmp_func_t cmp)75 static int insert_stat(struct rb_root *root, void *stat, cmp_func_t cmp)
76 {
77 	struct rb_node **new = &(root->rb_node), *parent = NULL;
78 	struct stat_node *data;
79 
80 	data = kzalloc(sizeof(*data), GFP_KERNEL);
81 	if (!data)
82 		return -ENOMEM;
83 	data->stat = stat;
84 
85 	/*
86 	 * Figure out where to put new node
87 	 * This is a descendent sorting
88 	 */
89 	while (*new) {
90 		struct stat_node *this;
91 		int result;
92 
93 		this = container_of(*new, struct stat_node, node);
94 		result = cmp(data->stat, this->stat);
95 
96 		parent = *new;
97 		if (result >= 0)
98 			new = &((*new)->rb_left);
99 		else
100 			new = &((*new)->rb_right);
101 	}
102 
103 	rb_link_node(&data->node, parent, new);
104 	rb_insert_color(&data->node, root);
105 	return 0;
106 }
107 
108 /*
109  * For tracers that don't provide a stat_cmp callback.
110  * This one will force an insertion as right-most node
111  * in the rbtree.
112  */
dummy_cmp(const void * p1,const void * p2)113 static int dummy_cmp(const void *p1, const void *p2)
114 {
115 	return -1;
116 }
117 
118 /*
119  * Initialize the stat rbtree at each trace_stat file opening.
120  * All of these copies and sorting are required on all opening
121  * since the stats could have changed between two file sessions.
122  */
stat_seq_init(struct stat_session * session)123 static int stat_seq_init(struct stat_session *session)
124 {
125 	struct tracer_stat *ts = session->ts;
126 	struct rb_root *root = &session->stat_root;
127 	void *stat;
128 	int ret = 0;
129 	int i;
130 
131 	guard(mutex)(&session->stat_mutex);
132 	__reset_stat_session(session);
133 
134 	if (!ts->stat_cmp)
135 		ts->stat_cmp = dummy_cmp;
136 
137 	stat = ts->stat_start(ts);
138 	if (!stat)
139 		return 0;
140 
141 	ret = insert_stat(root, stat, ts->stat_cmp);
142 	if (ret)
143 		return ret;
144 
145 	/*
146 	 * Iterate over the tracer stat entries and store them in an rbtree.
147 	 */
148 	for (i = 1; ; i++) {
149 		stat = ts->stat_next(stat, i);
150 
151 		/* End of insertion */
152 		if (!stat)
153 			break;
154 
155 		ret = insert_stat(root, stat, ts->stat_cmp);
156 		if (ret)
157 			goto exit_free_rbtree;
158 	}
159 
160 	return ret;
161 
162 exit_free_rbtree:
163 	__reset_stat_session(session);
164 	return ret;
165 }
166 
167 
stat_seq_start(struct seq_file * s,loff_t * pos)168 static void *stat_seq_start(struct seq_file *s, loff_t *pos)
169 {
170 	struct stat_session *session = s->private;
171 	struct rb_node *node;
172 	int n = *pos;
173 	int i;
174 
175 	/* Prevent from tracer switch or rbtree modification */
176 	mutex_lock(&session->stat_mutex);
177 
178 	/* If we are in the beginning of the file, print the headers */
179 	if (session->ts->stat_headers) {
180 		if (n == 0)
181 			return SEQ_START_TOKEN;
182 		n--;
183 	}
184 
185 	node = rb_first(&session->stat_root);
186 	for (i = 0; node && i < n; i++)
187 		node = rb_next(node);
188 
189 	return node;
190 }
191 
stat_seq_next(struct seq_file * s,void * p,loff_t * pos)192 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
193 {
194 	struct stat_session *session = s->private;
195 	struct rb_node *node = p;
196 
197 	(*pos)++;
198 
199 	if (p == SEQ_START_TOKEN)
200 		return rb_first(&session->stat_root);
201 
202 	return rb_next(node);
203 }
204 
stat_seq_stop(struct seq_file * s,void * p)205 static void stat_seq_stop(struct seq_file *s, void *p)
206 {
207 	struct stat_session *session = s->private;
208 	mutex_unlock(&session->stat_mutex);
209 }
210 
stat_seq_show(struct seq_file * s,void * v)211 static int stat_seq_show(struct seq_file *s, void *v)
212 {
213 	struct stat_session *session = s->private;
214 	struct stat_node *l = container_of(v, struct stat_node, node);
215 
216 	if (v == SEQ_START_TOKEN)
217 		return session->ts->stat_headers(s);
218 
219 	return session->ts->stat_show(s, l->stat);
220 }
221 
222 static const struct seq_operations trace_stat_seq_ops = {
223 	.start		= stat_seq_start,
224 	.next		= stat_seq_next,
225 	.stop		= stat_seq_stop,
226 	.show		= stat_seq_show
227 };
228 
229 /* The session stat is refilled and resorted at each stat file opening */
tracing_stat_open(struct inode * inode,struct file * file)230 static int tracing_stat_open(struct inode *inode, struct file *file)
231 {
232 	int ret;
233 	struct seq_file *m;
234 	struct stat_session *session = inode->i_private;
235 
236 	ret = security_locked_down(LOCKDOWN_TRACEFS);
237 	if (ret)
238 		return ret;
239 
240 	ret = stat_seq_init(session);
241 	if (ret)
242 		return ret;
243 
244 	ret = seq_open(file, &trace_stat_seq_ops);
245 	if (ret) {
246 		reset_stat_session(session);
247 		return ret;
248 	}
249 
250 	m = file->private_data;
251 	m->private = session;
252 	return ret;
253 }
254 
255 /*
256  * Avoid consuming memory with our now useless rbtree.
257  */
tracing_stat_release(struct inode * i,struct file * f)258 static int tracing_stat_release(struct inode *i, struct file *f)
259 {
260 	struct stat_session *session = i->i_private;
261 
262 	reset_stat_session(session);
263 
264 	return seq_release(i, f);
265 }
266 
267 static const struct file_operations tracing_stat_fops = {
268 	.open		= tracing_stat_open,
269 	.read		= seq_read,
270 	.llseek		= seq_lseek,
271 	.release	= tracing_stat_release
272 };
273 
tracing_stat_init(void)274 static int tracing_stat_init(void)
275 {
276 	int ret;
277 
278 	ret = tracing_init_dentry();
279 	if (ret)
280 		return -ENODEV;
281 
282 	stat_dir = tracefs_create_dir("trace_stat", NULL);
283 	if (!stat_dir) {
284 		pr_warn("Could not create tracefs 'trace_stat' entry\n");
285 		return -ENOMEM;
286 	}
287 	return 0;
288 }
289 
init_stat_file(struct stat_session * session)290 static int init_stat_file(struct stat_session *session)
291 {
292 	int ret;
293 
294 	if (!stat_dir && (ret = tracing_stat_init()))
295 		return ret;
296 
297 	session->file = tracefs_create_file(session->ts->name, TRACE_MODE_WRITE,
298 					    stat_dir, session,
299 					    &tracing_stat_fops);
300 	if (!session->file)
301 		return -ENOMEM;
302 	return 0;
303 }
304 
register_stat_tracer(struct tracer_stat * trace)305 int register_stat_tracer(struct tracer_stat *trace)
306 {
307 	struct stat_session *session, *node;
308 	int ret;
309 
310 	if (!trace)
311 		return -EINVAL;
312 
313 	if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
314 		return -EINVAL;
315 
316 	guard(mutex)(&all_stat_sessions_mutex);
317 
318 	/* Already registered? */
319 	list_for_each_entry(node, &all_stat_sessions, session_list) {
320 		if (node->ts == trace)
321 			return -EINVAL;
322 	}
323 
324 	/* Init the session */
325 	session = kzalloc(sizeof(*session), GFP_KERNEL);
326 	if (!session)
327 		return -ENOMEM;
328 
329 	session->ts = trace;
330 	INIT_LIST_HEAD(&session->session_list);
331 	mutex_init(&session->stat_mutex);
332 
333 	ret = init_stat_file(session);
334 	if (ret) {
335 		destroy_session(session);
336 		return ret;
337 	}
338 
339 	/* Register */
340 	list_add_tail(&session->session_list, &all_stat_sessions);
341 
342 	return 0;
343 }
344 
unregister_stat_tracer(struct tracer_stat * trace)345 void unregister_stat_tracer(struct tracer_stat *trace)
346 {
347 	struct stat_session *node, *tmp;
348 
349 	mutex_lock(&all_stat_sessions_mutex);
350 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
351 		if (node->ts == trace) {
352 			list_del(&node->session_list);
353 			destroy_session(node);
354 			break;
355 		}
356 	}
357 	mutex_unlock(&all_stat_sessions_mutex);
358 }
359