1 /* 2 * Infrastructure for statistic tracing (histogram output). 3 * 4 * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * 6 * Based on the code from trace_branch.c which is 7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 8 * 9 */ 10 11 12 #include <linux/list.h> 13 #include <linux/rbtree.h> 14 #include <linux/debugfs.h> 15 #include "trace_stat.h" 16 #include "trace.h" 17 18 19 /* 20 * List of stat red-black nodes from a tracer 21 * We use a such tree to sort quickly the stat 22 * entries from the tracer. 23 */ 24 struct stat_node { 25 struct rb_node node; 26 void *stat; 27 }; 28 29 /* A stat session is the stats output in one file */ 30 struct stat_session { 31 struct list_head session_list; 32 struct tracer_stat *ts; 33 struct rb_root stat_root; 34 struct mutex stat_mutex; 35 struct dentry *file; 36 }; 37 38 /* All of the sessions currently in use. Each stat file embed one session */ 39 static LIST_HEAD(all_stat_sessions); 40 static DEFINE_MUTEX(all_stat_sessions_mutex); 41 42 /* The root directory for all stat files */ 43 static struct dentry *stat_dir; 44 45 /* 46 * Iterate through the rbtree using a post order traversal path 47 * to release the next node. 48 * It won't necessary release one at each iteration 49 * but it will at least advance closer to the next one 50 * to be released. 51 */ 52 static struct rb_node *release_next(struct rb_node *node) 53 { 54 struct stat_node *snode; 55 struct rb_node *parent = rb_parent(node); 56 57 if (node->rb_left) 58 return node->rb_left; 59 else if (node->rb_right) 60 return node->rb_right; 61 else { 62 if (!parent) 63 ; 64 else if (parent->rb_left == node) 65 parent->rb_left = NULL; 66 else 67 parent->rb_right = NULL; 68 69 snode = container_of(node, struct stat_node, node); 70 kfree(snode); 71 72 return parent; 73 } 74 } 75 76 static void __reset_stat_session(struct stat_session *session) 77 { 78 struct rb_node *node = session->stat_root.rb_node; 79 80 while (node) 81 node = release_next(node); 82 83 session->stat_root = RB_ROOT; 84 } 85 86 static void reset_stat_session(struct stat_session *session) 87 { 88 mutex_lock(&session->stat_mutex); 89 __reset_stat_session(session); 90 mutex_unlock(&session->stat_mutex); 91 } 92 93 static void destroy_session(struct stat_session *session) 94 { 95 debugfs_remove(session->file); 96 __reset_stat_session(session); 97 mutex_destroy(&session->stat_mutex); 98 kfree(session); 99 } 100 101 typedef int (*cmp_stat_t)(void *, void *); 102 103 static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp) 104 { 105 struct rb_node **new = &(root->rb_node), *parent = NULL; 106 struct stat_node *data; 107 108 data = kzalloc(sizeof(*data), GFP_KERNEL); 109 if (!data) 110 return -ENOMEM; 111 data->stat = stat; 112 113 /* 114 * Figure out where to put new node 115 * This is a descendent sorting 116 */ 117 while (*new) { 118 struct stat_node *this; 119 int result; 120 121 this = container_of(*new, struct stat_node, node); 122 result = cmp(data->stat, this->stat); 123 124 parent = *new; 125 if (result >= 0) 126 new = &((*new)->rb_left); 127 else 128 new = &((*new)->rb_right); 129 } 130 131 rb_link_node(&data->node, parent, new); 132 rb_insert_color(&data->node, root); 133 return 0; 134 } 135 136 /* 137 * For tracers that don't provide a stat_cmp callback. 138 * This one will force an insertion as right-most node 139 * in the rbtree. 140 */ 141 static int dummy_cmp(void *p1, void *p2) 142 { 143 return -1; 144 } 145 146 /* 147 * Initialize the stat rbtree at each trace_stat file opening. 148 * All of these copies and sorting are required on all opening 149 * since the stats could have changed between two file sessions. 150 */ 151 static int stat_seq_init(struct stat_session *session) 152 { 153 struct tracer_stat *ts = session->ts; 154 struct rb_root *root = &session->stat_root; 155 void *stat; 156 int ret = 0; 157 int i; 158 159 mutex_lock(&session->stat_mutex); 160 __reset_stat_session(session); 161 162 if (!ts->stat_cmp) 163 ts->stat_cmp = dummy_cmp; 164 165 stat = ts->stat_start(ts); 166 if (!stat) 167 goto exit; 168 169 ret = insert_stat(root, stat, ts->stat_cmp); 170 if (ret) 171 goto exit; 172 173 /* 174 * Iterate over the tracer stat entries and store them in an rbtree. 175 */ 176 for (i = 1; ; i++) { 177 stat = ts->stat_next(stat, i); 178 179 /* End of insertion */ 180 if (!stat) 181 break; 182 183 ret = insert_stat(root, stat, ts->stat_cmp); 184 if (ret) 185 goto exit_free_rbtree; 186 } 187 188 exit: 189 mutex_unlock(&session->stat_mutex); 190 return ret; 191 192 exit_free_rbtree: 193 __reset_stat_session(session); 194 mutex_unlock(&session->stat_mutex); 195 return ret; 196 } 197 198 199 static void *stat_seq_start(struct seq_file *s, loff_t *pos) 200 { 201 struct stat_session *session = s->private; 202 struct rb_node *node; 203 int i; 204 205 /* Prevent from tracer switch or rbtree modification */ 206 mutex_lock(&session->stat_mutex); 207 208 /* If we are in the beginning of the file, print the headers */ 209 if (!*pos && session->ts->stat_headers) 210 return SEQ_START_TOKEN; 211 212 node = rb_first(&session->stat_root); 213 for (i = 0; node && i < *pos; i++) 214 node = rb_next(node); 215 216 return node; 217 } 218 219 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) 220 { 221 struct stat_session *session = s->private; 222 struct rb_node *node = p; 223 224 (*pos)++; 225 226 if (p == SEQ_START_TOKEN) 227 return rb_first(&session->stat_root); 228 229 return rb_next(node); 230 } 231 232 static void stat_seq_stop(struct seq_file *s, void *p) 233 { 234 struct stat_session *session = s->private; 235 mutex_unlock(&session->stat_mutex); 236 } 237 238 static int stat_seq_show(struct seq_file *s, void *v) 239 { 240 struct stat_session *session = s->private; 241 struct stat_node *l = container_of(v, struct stat_node, node); 242 243 if (v == SEQ_START_TOKEN) 244 return session->ts->stat_headers(s); 245 246 return session->ts->stat_show(s, l->stat); 247 } 248 249 static const struct seq_operations trace_stat_seq_ops = { 250 .start = stat_seq_start, 251 .next = stat_seq_next, 252 .stop = stat_seq_stop, 253 .show = stat_seq_show 254 }; 255 256 /* The session stat is refilled and resorted at each stat file opening */ 257 static int tracing_stat_open(struct inode *inode, struct file *file) 258 { 259 int ret; 260 struct seq_file *m; 261 struct stat_session *session = inode->i_private; 262 263 ret = stat_seq_init(session); 264 if (ret) 265 return ret; 266 267 ret = seq_open(file, &trace_stat_seq_ops); 268 if (ret) { 269 reset_stat_session(session); 270 return ret; 271 } 272 273 m = file->private_data; 274 m->private = session; 275 return ret; 276 } 277 278 /* 279 * Avoid consuming memory with our now useless rbtree. 280 */ 281 static int tracing_stat_release(struct inode *i, struct file *f) 282 { 283 struct stat_session *session = i->i_private; 284 285 reset_stat_session(session); 286 287 return seq_release(i, f); 288 } 289 290 static const struct file_operations tracing_stat_fops = { 291 .open = tracing_stat_open, 292 .read = seq_read, 293 .llseek = seq_lseek, 294 .release = tracing_stat_release 295 }; 296 297 static int tracing_stat_init(void) 298 { 299 struct dentry *d_tracing; 300 301 d_tracing = tracing_init_dentry(); 302 303 stat_dir = debugfs_create_dir("trace_stat", d_tracing); 304 if (!stat_dir) 305 pr_warning("Could not create debugfs " 306 "'trace_stat' entry\n"); 307 return 0; 308 } 309 310 static int init_stat_file(struct stat_session *session) 311 { 312 if (!stat_dir && tracing_stat_init()) 313 return -ENODEV; 314 315 session->file = debugfs_create_file(session->ts->name, 0644, 316 stat_dir, 317 session, &tracing_stat_fops); 318 if (!session->file) 319 return -ENOMEM; 320 return 0; 321 } 322 323 int register_stat_tracer(struct tracer_stat *trace) 324 { 325 struct stat_session *session, *node; 326 int ret; 327 328 if (!trace) 329 return -EINVAL; 330 331 if (!trace->stat_start || !trace->stat_next || !trace->stat_show) 332 return -EINVAL; 333 334 /* Already registered? */ 335 mutex_lock(&all_stat_sessions_mutex); 336 list_for_each_entry(node, &all_stat_sessions, session_list) { 337 if (node->ts == trace) { 338 mutex_unlock(&all_stat_sessions_mutex); 339 return -EINVAL; 340 } 341 } 342 mutex_unlock(&all_stat_sessions_mutex); 343 344 /* Init the session */ 345 session = kzalloc(sizeof(*session), GFP_KERNEL); 346 if (!session) 347 return -ENOMEM; 348 349 session->ts = trace; 350 INIT_LIST_HEAD(&session->session_list); 351 mutex_init(&session->stat_mutex); 352 353 ret = init_stat_file(session); 354 if (ret) { 355 destroy_session(session); 356 return ret; 357 } 358 359 /* Register */ 360 mutex_lock(&all_stat_sessions_mutex); 361 list_add_tail(&session->session_list, &all_stat_sessions); 362 mutex_unlock(&all_stat_sessions_mutex); 363 364 return 0; 365 } 366 367 void unregister_stat_tracer(struct tracer_stat *trace) 368 { 369 struct stat_session *node, *tmp; 370 371 mutex_lock(&all_stat_sessions_mutex); 372 list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { 373 if (node->ts == trace) { 374 list_del(&node->session_list); 375 destroy_session(node); 376 break; 377 } 378 } 379 mutex_unlock(&all_stat_sessions_mutex); 380 } 381