1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_mutex); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 int trace_print_seq(struct seq_file *m, struct trace_seq *s) 24 { 25 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 26 int ret; 27 28 ret = seq_write(m, s->buffer, len); 29 30 /* 31 * Only reset this buffer if we successfully wrote to the 32 * seq_file buffer. 33 */ 34 if (!ret) 35 trace_seq_init(s); 36 37 return ret; 38 } 39 40 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 41 { 42 struct trace_seq *s = &iter->seq; 43 struct trace_entry *entry = iter->ent; 44 struct bprint_entry *field; 45 int ret; 46 47 trace_assign_type(field, entry); 48 49 ret = trace_seq_bprintf(s, field->fmt, field->buf); 50 if (!ret) 51 return TRACE_TYPE_PARTIAL_LINE; 52 53 return TRACE_TYPE_HANDLED; 54 } 55 56 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 57 { 58 struct trace_seq *s = &iter->seq; 59 struct trace_entry *entry = iter->ent; 60 struct print_entry *field; 61 int ret; 62 63 trace_assign_type(field, entry); 64 65 ret = trace_seq_printf(s, "%s", field->buf); 66 if (!ret) 67 return TRACE_TYPE_PARTIAL_LINE; 68 69 return TRACE_TYPE_HANDLED; 70 } 71 72 /** 73 * trace_seq_printf - sequence printing of trace information 74 * @s: trace sequence descriptor 75 * @fmt: printf format string 76 * 77 * It returns 0 if the trace oversizes the buffer's free 78 * space, 1 otherwise. 79 * 80 * The tracer may use either sequence operations or its own 81 * copy to user routines. To simplify formating of a trace 82 * trace_seq_printf is used to store strings into a special 83 * buffer (@s). Then the output may be either used by 84 * the sequencer or pulled into another buffer. 85 */ 86 int 87 trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 88 { 89 int len = (PAGE_SIZE - 1) - s->len; 90 va_list ap; 91 int ret; 92 93 if (s->full || !len) 94 return 0; 95 96 va_start(ap, fmt); 97 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); 98 va_end(ap); 99 100 /* If we can't write it all, don't bother writing anything */ 101 if (ret >= len) { 102 s->full = 1; 103 return 0; 104 } 105 106 s->len += ret; 107 108 return 1; 109 } 110 EXPORT_SYMBOL_GPL(trace_seq_printf); 111 112 /** 113 * trace_seq_vprintf - sequence printing of trace information 114 * @s: trace sequence descriptor 115 * @fmt: printf format string 116 * 117 * The tracer may use either sequence operations or its own 118 * copy to user routines. To simplify formating of a trace 119 * trace_seq_printf is used to store strings into a special 120 * buffer (@s). Then the output may be either used by 121 * the sequencer or pulled into another buffer. 122 */ 123 int 124 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) 125 { 126 int len = (PAGE_SIZE - 1) - s->len; 127 int ret; 128 129 if (s->full || !len) 130 return 0; 131 132 ret = vsnprintf(s->buffer + s->len, len, fmt, args); 133 134 /* If we can't write it all, don't bother writing anything */ 135 if (ret >= len) { 136 s->full = 1; 137 return 0; 138 } 139 140 s->len += ret; 141 142 return len; 143 } 144 EXPORT_SYMBOL_GPL(trace_seq_vprintf); 145 146 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 147 { 148 int len = (PAGE_SIZE - 1) - s->len; 149 int ret; 150 151 if (s->full || !len) 152 return 0; 153 154 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 155 156 /* If we can't write it all, don't bother writing anything */ 157 if (ret >= len) { 158 s->full = 1; 159 return 0; 160 } 161 162 s->len += ret; 163 164 return len; 165 } 166 167 /** 168 * trace_seq_puts - trace sequence printing of simple string 169 * @s: trace sequence descriptor 170 * @str: simple string to record 171 * 172 * The tracer may use either the sequence operations or its own 173 * copy to user routines. This function records a simple string 174 * into a special buffer (@s) for later retrieval by a sequencer 175 * or other mechanism. 176 */ 177 int trace_seq_puts(struct trace_seq *s, const char *str) 178 { 179 int len = strlen(str); 180 181 if (s->full) 182 return 0; 183 184 if (len > ((PAGE_SIZE - 1) - s->len)) { 185 s->full = 1; 186 return 0; 187 } 188 189 memcpy(s->buffer + s->len, str, len); 190 s->len += len; 191 192 return len; 193 } 194 195 int trace_seq_putc(struct trace_seq *s, unsigned char c) 196 { 197 if (s->full) 198 return 0; 199 200 if (s->len >= (PAGE_SIZE - 1)) { 201 s->full = 1; 202 return 0; 203 } 204 205 s->buffer[s->len++] = c; 206 207 return 1; 208 } 209 EXPORT_SYMBOL(trace_seq_putc); 210 211 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) 212 { 213 if (s->full) 214 return 0; 215 216 if (len > ((PAGE_SIZE - 1) - s->len)) { 217 s->full = 1; 218 return 0; 219 } 220 221 memcpy(s->buffer + s->len, mem, len); 222 s->len += len; 223 224 return len; 225 } 226 227 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) 228 { 229 unsigned char hex[HEX_CHARS]; 230 const unsigned char *data = mem; 231 int i, j; 232 233 if (s->full) 234 return 0; 235 236 #ifdef __BIG_ENDIAN 237 for (i = 0, j = 0; i < len; i++) { 238 #else 239 for (i = len-1, j = 0; i >= 0; i--) { 240 #endif 241 hex[j++] = hex_asc_hi(data[i]); 242 hex[j++] = hex_asc_lo(data[i]); 243 } 244 hex[j++] = ' '; 245 246 return trace_seq_putmem(s, hex, j); 247 } 248 249 void *trace_seq_reserve(struct trace_seq *s, size_t len) 250 { 251 void *ret; 252 253 if (s->full) 254 return NULL; 255 256 if (len > ((PAGE_SIZE - 1) - s->len)) { 257 s->full = 1; 258 return NULL; 259 } 260 261 ret = s->buffer + s->len; 262 s->len += len; 263 264 return ret; 265 } 266 267 int trace_seq_path(struct trace_seq *s, struct path *path) 268 { 269 unsigned char *p; 270 271 if (s->full) 272 return 0; 273 274 if (s->len >= (PAGE_SIZE - 1)) { 275 s->full = 1; 276 return 0; 277 } 278 279 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 280 if (!IS_ERR(p)) { 281 p = mangle_path(s->buffer + s->len, p, "\n"); 282 if (p) { 283 s->len = p - s->buffer; 284 return 1; 285 } 286 } else { 287 s->buffer[s->len++] = '?'; 288 return 1; 289 } 290 291 s->full = 1; 292 return 0; 293 } 294 295 const char * 296 ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 297 unsigned long flags, 298 const struct trace_print_flags *flag_array) 299 { 300 unsigned long mask; 301 const char *str; 302 const char *ret = p->buffer + p->len; 303 int i; 304 305 for (i = 0; flag_array[i].name && flags; i++) { 306 307 mask = flag_array[i].mask; 308 if ((flags & mask) != mask) 309 continue; 310 311 str = flag_array[i].name; 312 flags &= ~mask; 313 if (p->len && delim) 314 trace_seq_puts(p, delim); 315 trace_seq_puts(p, str); 316 } 317 318 /* check for left over flags */ 319 if (flags) { 320 if (p->len && delim) 321 trace_seq_puts(p, delim); 322 trace_seq_printf(p, "0x%lx", flags); 323 } 324 325 trace_seq_putc(p, 0); 326 327 return ret; 328 } 329 EXPORT_SYMBOL(ftrace_print_flags_seq); 330 331 const char * 332 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 333 const struct trace_print_flags *symbol_array) 334 { 335 int i; 336 const char *ret = p->buffer + p->len; 337 338 for (i = 0; symbol_array[i].name; i++) { 339 340 if (val != symbol_array[i].mask) 341 continue; 342 343 trace_seq_puts(p, symbol_array[i].name); 344 break; 345 } 346 347 if (!p->len) 348 trace_seq_printf(p, "0x%lx", val); 349 350 trace_seq_putc(p, 0); 351 352 return ret; 353 } 354 EXPORT_SYMBOL(ftrace_print_symbols_seq); 355 356 #if BITS_PER_LONG == 32 357 const char * 358 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 359 const struct trace_print_flags_u64 *symbol_array) 360 { 361 int i; 362 const char *ret = p->buffer + p->len; 363 364 for (i = 0; symbol_array[i].name; i++) { 365 366 if (val != symbol_array[i].mask) 367 continue; 368 369 trace_seq_puts(p, symbol_array[i].name); 370 break; 371 } 372 373 if (!p->len) 374 trace_seq_printf(p, "0x%llx", val); 375 376 trace_seq_putc(p, 0); 377 378 return ret; 379 } 380 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); 381 #endif 382 383 const char * 384 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 385 { 386 int i; 387 const char *ret = p->buffer + p->len; 388 389 for (i = 0; i < buf_len; i++) 390 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 391 392 trace_seq_putc(p, 0); 393 394 return ret; 395 } 396 EXPORT_SYMBOL(ftrace_print_hex_seq); 397 398 #ifdef CONFIG_KRETPROBES 399 static inline const char *kretprobed(const char *name) 400 { 401 static const char tramp_name[] = "kretprobe_trampoline"; 402 int size = sizeof(tramp_name); 403 404 if (strncmp(tramp_name, name, size) == 0) 405 return "[unknown/kretprobe'd]"; 406 return name; 407 } 408 #else 409 static inline const char *kretprobed(const char *name) 410 { 411 return name; 412 } 413 #endif /* CONFIG_KRETPROBES */ 414 415 static int 416 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 417 { 418 #ifdef CONFIG_KALLSYMS 419 char str[KSYM_SYMBOL_LEN]; 420 const char *name; 421 422 kallsyms_lookup(address, NULL, NULL, NULL, str); 423 424 name = kretprobed(str); 425 426 return trace_seq_printf(s, fmt, name); 427 #endif 428 return 1; 429 } 430 431 static int 432 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 433 unsigned long address) 434 { 435 #ifdef CONFIG_KALLSYMS 436 char str[KSYM_SYMBOL_LEN]; 437 const char *name; 438 439 sprint_symbol(str, address); 440 name = kretprobed(str); 441 442 return trace_seq_printf(s, fmt, name); 443 #endif 444 return 1; 445 } 446 447 #ifndef CONFIG_64BIT 448 # define IP_FMT "%08lx" 449 #else 450 # define IP_FMT "%016lx" 451 #endif 452 453 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 454 unsigned long ip, unsigned long sym_flags) 455 { 456 struct file *file = NULL; 457 unsigned long vmstart = 0; 458 int ret = 1; 459 460 if (s->full) 461 return 0; 462 463 if (mm) { 464 const struct vm_area_struct *vma; 465 466 down_read(&mm->mmap_sem); 467 vma = find_vma(mm, ip); 468 if (vma) { 469 file = vma->vm_file; 470 vmstart = vma->vm_start; 471 } 472 if (file) { 473 ret = trace_seq_path(s, &file->f_path); 474 if (ret) 475 ret = trace_seq_printf(s, "[+0x%lx]", 476 ip - vmstart); 477 } 478 up_read(&mm->mmap_sem); 479 } 480 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 481 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 482 return ret; 483 } 484 485 int 486 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 487 unsigned long sym_flags) 488 { 489 struct mm_struct *mm = NULL; 490 int ret = 1; 491 unsigned int i; 492 493 if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 494 struct task_struct *task; 495 /* 496 * we do the lookup on the thread group leader, 497 * since individual threads might have already quit! 498 */ 499 rcu_read_lock(); 500 task = find_task_by_vpid(entry->tgid); 501 if (task) 502 mm = get_task_mm(task); 503 rcu_read_unlock(); 504 } 505 506 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 507 unsigned long ip = entry->caller[i]; 508 509 if (ip == ULONG_MAX || !ret) 510 break; 511 if (ret) 512 ret = trace_seq_puts(s, " => "); 513 if (!ip) { 514 if (ret) 515 ret = trace_seq_puts(s, "??"); 516 if (ret) 517 ret = trace_seq_puts(s, "\n"); 518 continue; 519 } 520 if (!ret) 521 break; 522 if (ret) 523 ret = seq_print_user_ip(s, mm, ip, sym_flags); 524 ret = trace_seq_puts(s, "\n"); 525 } 526 527 if (mm) 528 mmput(mm); 529 return ret; 530 } 531 532 int 533 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 534 { 535 int ret; 536 537 if (!ip) 538 return trace_seq_printf(s, "0"); 539 540 if (sym_flags & TRACE_ITER_SYM_OFFSET) 541 ret = seq_print_sym_offset(s, "%s", ip); 542 else 543 ret = seq_print_sym_short(s, "%s", ip); 544 545 if (!ret) 546 return 0; 547 548 if (sym_flags & TRACE_ITER_SYM_ADDR) 549 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 550 return ret; 551 } 552 553 /** 554 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 555 * @s: trace seq struct to write to 556 * @entry: The trace entry field from the ring buffer 557 * 558 * Prints the generic fields of irqs off, in hard or softirq, preempt 559 * count. 560 */ 561 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 562 { 563 char hardsoft_irq; 564 char need_resched; 565 char irqs_off; 566 int hardirq; 567 int softirq; 568 int ret; 569 570 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 571 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 572 573 irqs_off = 574 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 575 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 576 '.'; 577 need_resched = 578 (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; 579 hardsoft_irq = 580 (hardirq && softirq) ? 'H' : 581 hardirq ? 'h' : 582 softirq ? 's' : 583 '.'; 584 585 if (!trace_seq_printf(s, "%c%c%c", 586 irqs_off, need_resched, hardsoft_irq)) 587 return 0; 588 589 if (entry->preempt_count) 590 ret = trace_seq_printf(s, "%x", entry->preempt_count); 591 else 592 ret = trace_seq_putc(s, '.'); 593 594 return ret; 595 } 596 597 static int 598 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 599 { 600 char comm[TASK_COMM_LEN]; 601 602 trace_find_cmdline(entry->pid, comm); 603 604 if (!trace_seq_printf(s, "%8.8s-%-5d %3d", 605 comm, entry->pid, cpu)) 606 return 0; 607 608 return trace_print_lat_fmt(s, entry); 609 } 610 611 static unsigned long preempt_mark_thresh = 100; 612 613 static int 614 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, 615 unsigned long rel_usecs) 616 { 617 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, 618 rel_usecs > preempt_mark_thresh ? '!' : 619 rel_usecs > 1 ? '+' : ' '); 620 } 621 622 int trace_print_context(struct trace_iterator *iter) 623 { 624 struct trace_seq *s = &iter->seq; 625 struct trace_entry *entry = iter->ent; 626 unsigned long long t = ns2usecs(iter->ts); 627 unsigned long usec_rem = do_div(t, USEC_PER_SEC); 628 unsigned long secs = (unsigned long)t; 629 char comm[TASK_COMM_LEN]; 630 int ret; 631 632 trace_find_cmdline(entry->pid, comm); 633 634 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", 635 comm, entry->pid, iter->cpu); 636 if (!ret) 637 return 0; 638 639 if (trace_flags & TRACE_ITER_IRQ_INFO) { 640 ret = trace_print_lat_fmt(s, entry); 641 if (!ret) 642 return 0; 643 } 644 645 return trace_seq_printf(s, " %5lu.%06lu: ", 646 secs, usec_rem); 647 } 648 649 int trace_print_lat_context(struct trace_iterator *iter) 650 { 651 u64 next_ts; 652 int ret; 653 struct trace_seq *s = &iter->seq; 654 struct trace_entry *entry = iter->ent, 655 *next_entry = trace_find_next_entry(iter, NULL, 656 &next_ts); 657 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 658 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); 659 unsigned long rel_usecs; 660 661 if (!next_entry) 662 next_ts = iter->ts; 663 rel_usecs = ns2usecs(next_ts - iter->ts); 664 665 if (verbose) { 666 char comm[TASK_COMM_LEN]; 667 668 trace_find_cmdline(entry->pid, comm); 669 670 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" 671 " %ld.%03ldms (+%ld.%03ldms): ", comm, 672 entry->pid, iter->cpu, entry->flags, 673 entry->preempt_count, iter->idx, 674 ns2usecs(iter->ts), 675 abs_usecs / USEC_PER_MSEC, 676 abs_usecs % USEC_PER_MSEC, 677 rel_usecs / USEC_PER_MSEC, 678 rel_usecs % USEC_PER_MSEC); 679 } else { 680 ret = lat_print_generic(s, entry, iter->cpu); 681 if (ret) 682 ret = lat_print_timestamp(s, abs_usecs, rel_usecs); 683 } 684 685 return ret; 686 } 687 688 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 689 690 static int task_state_char(unsigned long state) 691 { 692 int bit = state ? __ffs(state) + 1 : 0; 693 694 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 695 } 696 697 /** 698 * ftrace_find_event - find a registered event 699 * @type: the type of event to look for 700 * 701 * Returns an event of type @type otherwise NULL 702 * Called with trace_event_read_lock() held. 703 */ 704 struct trace_event *ftrace_find_event(int type) 705 { 706 struct trace_event *event; 707 struct hlist_node *n; 708 unsigned key; 709 710 key = type & (EVENT_HASHSIZE - 1); 711 712 hlist_for_each_entry(event, n, &event_hash[key], node) { 713 if (event->type == type) 714 return event; 715 } 716 717 return NULL; 718 } 719 720 static LIST_HEAD(ftrace_event_list); 721 722 static int trace_search_list(struct list_head **list) 723 { 724 struct trace_event *e; 725 int last = __TRACE_LAST_TYPE; 726 727 if (list_empty(&ftrace_event_list)) { 728 *list = &ftrace_event_list; 729 return last + 1; 730 } 731 732 /* 733 * We used up all possible max events, 734 * lets see if somebody freed one. 735 */ 736 list_for_each_entry(e, &ftrace_event_list, list) { 737 if (e->type != last + 1) 738 break; 739 last++; 740 } 741 742 /* Did we used up all 65 thousand events??? */ 743 if ((last + 1) > FTRACE_MAX_EVENT) 744 return 0; 745 746 *list = &e->list; 747 return last + 1; 748 } 749 750 void trace_event_read_lock(void) 751 { 752 down_read(&trace_event_mutex); 753 } 754 755 void trace_event_read_unlock(void) 756 { 757 up_read(&trace_event_mutex); 758 } 759 760 /** 761 * register_ftrace_event - register output for an event type 762 * @event: the event type to register 763 * 764 * Event types are stored in a hash and this hash is used to 765 * find a way to print an event. If the @event->type is set 766 * then it will use that type, otherwise it will assign a 767 * type to use. 768 * 769 * If you assign your own type, please make sure it is added 770 * to the trace_type enum in trace.h, to avoid collisions 771 * with the dynamic types. 772 * 773 * Returns the event type number or zero on error. 774 */ 775 int register_ftrace_event(struct trace_event *event) 776 { 777 unsigned key; 778 int ret = 0; 779 780 down_write(&trace_event_mutex); 781 782 if (WARN_ON(!event)) 783 goto out; 784 785 if (WARN_ON(!event->funcs)) 786 goto out; 787 788 INIT_LIST_HEAD(&event->list); 789 790 if (!event->type) { 791 struct list_head *list = NULL; 792 793 if (next_event_type > FTRACE_MAX_EVENT) { 794 795 event->type = trace_search_list(&list); 796 if (!event->type) 797 goto out; 798 799 } else { 800 801 event->type = next_event_type++; 802 list = &ftrace_event_list; 803 } 804 805 if (WARN_ON(ftrace_find_event(event->type))) 806 goto out; 807 808 list_add_tail(&event->list, list); 809 810 } else if (event->type > __TRACE_LAST_TYPE) { 811 printk(KERN_WARNING "Need to add type to trace.h\n"); 812 WARN_ON(1); 813 goto out; 814 } else { 815 /* Is this event already used */ 816 if (ftrace_find_event(event->type)) 817 goto out; 818 } 819 820 if (event->funcs->trace == NULL) 821 event->funcs->trace = trace_nop_print; 822 if (event->funcs->raw == NULL) 823 event->funcs->raw = trace_nop_print; 824 if (event->funcs->hex == NULL) 825 event->funcs->hex = trace_nop_print; 826 if (event->funcs->binary == NULL) 827 event->funcs->binary = trace_nop_print; 828 829 key = event->type & (EVENT_HASHSIZE - 1); 830 831 hlist_add_head(&event->node, &event_hash[key]); 832 833 ret = event->type; 834 out: 835 up_write(&trace_event_mutex); 836 837 return ret; 838 } 839 EXPORT_SYMBOL_GPL(register_ftrace_event); 840 841 /* 842 * Used by module code with the trace_event_mutex held for write. 843 */ 844 int __unregister_ftrace_event(struct trace_event *event) 845 { 846 hlist_del(&event->node); 847 list_del(&event->list); 848 return 0; 849 } 850 851 /** 852 * unregister_ftrace_event - remove a no longer used event 853 * @event: the event to remove 854 */ 855 int unregister_ftrace_event(struct trace_event *event) 856 { 857 down_write(&trace_event_mutex); 858 __unregister_ftrace_event(event); 859 up_write(&trace_event_mutex); 860 861 return 0; 862 } 863 EXPORT_SYMBOL_GPL(unregister_ftrace_event); 864 865 /* 866 * Standard events 867 */ 868 869 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 870 struct trace_event *event) 871 { 872 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) 873 return TRACE_TYPE_PARTIAL_LINE; 874 875 return TRACE_TYPE_HANDLED; 876 } 877 878 /* TRACE_FN */ 879 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 880 struct trace_event *event) 881 { 882 struct ftrace_entry *field; 883 struct trace_seq *s = &iter->seq; 884 885 trace_assign_type(field, iter->ent); 886 887 if (!seq_print_ip_sym(s, field->ip, flags)) 888 goto partial; 889 890 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 891 if (!trace_seq_printf(s, " <-")) 892 goto partial; 893 if (!seq_print_ip_sym(s, 894 field->parent_ip, 895 flags)) 896 goto partial; 897 } 898 if (!trace_seq_printf(s, "\n")) 899 goto partial; 900 901 return TRACE_TYPE_HANDLED; 902 903 partial: 904 return TRACE_TYPE_PARTIAL_LINE; 905 } 906 907 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 908 struct trace_event *event) 909 { 910 struct ftrace_entry *field; 911 912 trace_assign_type(field, iter->ent); 913 914 if (!trace_seq_printf(&iter->seq, "%lx %lx\n", 915 field->ip, 916 field->parent_ip)) 917 return TRACE_TYPE_PARTIAL_LINE; 918 919 return TRACE_TYPE_HANDLED; 920 } 921 922 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 923 struct trace_event *event) 924 { 925 struct ftrace_entry *field; 926 struct trace_seq *s = &iter->seq; 927 928 trace_assign_type(field, iter->ent); 929 930 SEQ_PUT_HEX_FIELD_RET(s, field->ip); 931 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); 932 933 return TRACE_TYPE_HANDLED; 934 } 935 936 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 937 struct trace_event *event) 938 { 939 struct ftrace_entry *field; 940 struct trace_seq *s = &iter->seq; 941 942 trace_assign_type(field, iter->ent); 943 944 SEQ_PUT_FIELD_RET(s, field->ip); 945 SEQ_PUT_FIELD_RET(s, field->parent_ip); 946 947 return TRACE_TYPE_HANDLED; 948 } 949 950 static struct trace_event_functions trace_fn_funcs = { 951 .trace = trace_fn_trace, 952 .raw = trace_fn_raw, 953 .hex = trace_fn_hex, 954 .binary = trace_fn_bin, 955 }; 956 957 static struct trace_event trace_fn_event = { 958 .type = TRACE_FN, 959 .funcs = &trace_fn_funcs, 960 }; 961 962 /* TRACE_CTX an TRACE_WAKE */ 963 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 964 char *delim) 965 { 966 struct ctx_switch_entry *field; 967 char comm[TASK_COMM_LEN]; 968 int S, T; 969 970 971 trace_assign_type(field, iter->ent); 972 973 T = task_state_char(field->next_state); 974 S = task_state_char(field->prev_state); 975 trace_find_cmdline(field->next_pid, comm); 976 if (!trace_seq_printf(&iter->seq, 977 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 978 field->prev_pid, 979 field->prev_prio, 980 S, delim, 981 field->next_cpu, 982 field->next_pid, 983 field->next_prio, 984 T, comm)) 985 return TRACE_TYPE_PARTIAL_LINE; 986 987 return TRACE_TYPE_HANDLED; 988 } 989 990 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 991 struct trace_event *event) 992 { 993 return trace_ctxwake_print(iter, "==>"); 994 } 995 996 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 997 int flags, struct trace_event *event) 998 { 999 return trace_ctxwake_print(iter, " +"); 1000 } 1001 1002 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 1003 { 1004 struct ctx_switch_entry *field; 1005 int T; 1006 1007 trace_assign_type(field, iter->ent); 1008 1009 if (!S) 1010 S = task_state_char(field->prev_state); 1011 T = task_state_char(field->next_state); 1012 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 1013 field->prev_pid, 1014 field->prev_prio, 1015 S, 1016 field->next_cpu, 1017 field->next_pid, 1018 field->next_prio, 1019 T)) 1020 return TRACE_TYPE_PARTIAL_LINE; 1021 1022 return TRACE_TYPE_HANDLED; 1023 } 1024 1025 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 1026 struct trace_event *event) 1027 { 1028 return trace_ctxwake_raw(iter, 0); 1029 } 1030 1031 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 1032 struct trace_event *event) 1033 { 1034 return trace_ctxwake_raw(iter, '+'); 1035 } 1036 1037 1038 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 1039 { 1040 struct ctx_switch_entry *field; 1041 struct trace_seq *s = &iter->seq; 1042 int T; 1043 1044 trace_assign_type(field, iter->ent); 1045 1046 if (!S) 1047 S = task_state_char(field->prev_state); 1048 T = task_state_char(field->next_state); 1049 1050 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 1051 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 1052 SEQ_PUT_HEX_FIELD_RET(s, S); 1053 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); 1054 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); 1055 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); 1056 SEQ_PUT_HEX_FIELD_RET(s, T); 1057 1058 return TRACE_TYPE_HANDLED; 1059 } 1060 1061 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 1062 struct trace_event *event) 1063 { 1064 return trace_ctxwake_hex(iter, 0); 1065 } 1066 1067 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 1068 struct trace_event *event) 1069 { 1070 return trace_ctxwake_hex(iter, '+'); 1071 } 1072 1073 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 1074 int flags, struct trace_event *event) 1075 { 1076 struct ctx_switch_entry *field; 1077 struct trace_seq *s = &iter->seq; 1078 1079 trace_assign_type(field, iter->ent); 1080 1081 SEQ_PUT_FIELD_RET(s, field->prev_pid); 1082 SEQ_PUT_FIELD_RET(s, field->prev_prio); 1083 SEQ_PUT_FIELD_RET(s, field->prev_state); 1084 SEQ_PUT_FIELD_RET(s, field->next_pid); 1085 SEQ_PUT_FIELD_RET(s, field->next_prio); 1086 SEQ_PUT_FIELD_RET(s, field->next_state); 1087 1088 return TRACE_TYPE_HANDLED; 1089 } 1090 1091 static struct trace_event_functions trace_ctx_funcs = { 1092 .trace = trace_ctx_print, 1093 .raw = trace_ctx_raw, 1094 .hex = trace_ctx_hex, 1095 .binary = trace_ctxwake_bin, 1096 }; 1097 1098 static struct trace_event trace_ctx_event = { 1099 .type = TRACE_CTX, 1100 .funcs = &trace_ctx_funcs, 1101 }; 1102 1103 static struct trace_event_functions trace_wake_funcs = { 1104 .trace = trace_wake_print, 1105 .raw = trace_wake_raw, 1106 .hex = trace_wake_hex, 1107 .binary = trace_ctxwake_bin, 1108 }; 1109 1110 static struct trace_event trace_wake_event = { 1111 .type = TRACE_WAKE, 1112 .funcs = &trace_wake_funcs, 1113 }; 1114 1115 /* TRACE_STACK */ 1116 1117 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1118 int flags, struct trace_event *event) 1119 { 1120 struct stack_entry *field; 1121 struct trace_seq *s = &iter->seq; 1122 unsigned long *p; 1123 unsigned long *end; 1124 1125 trace_assign_type(field, iter->ent); 1126 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1127 1128 if (!trace_seq_puts(s, "<stack trace>\n")) 1129 goto partial; 1130 1131 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1132 if (!trace_seq_puts(s, " => ")) 1133 goto partial; 1134 1135 if (!seq_print_ip_sym(s, *p, flags)) 1136 goto partial; 1137 if (!trace_seq_puts(s, "\n")) 1138 goto partial; 1139 } 1140 1141 return TRACE_TYPE_HANDLED; 1142 1143 partial: 1144 return TRACE_TYPE_PARTIAL_LINE; 1145 } 1146 1147 static struct trace_event_functions trace_stack_funcs = { 1148 .trace = trace_stack_print, 1149 }; 1150 1151 static struct trace_event trace_stack_event = { 1152 .type = TRACE_STACK, 1153 .funcs = &trace_stack_funcs, 1154 }; 1155 1156 /* TRACE_USER_STACK */ 1157 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1158 int flags, struct trace_event *event) 1159 { 1160 struct userstack_entry *field; 1161 struct trace_seq *s = &iter->seq; 1162 1163 trace_assign_type(field, iter->ent); 1164 1165 if (!trace_seq_puts(s, "<user stack trace>\n")) 1166 goto partial; 1167 1168 if (!seq_print_userip_objs(field, s, flags)) 1169 goto partial; 1170 1171 return TRACE_TYPE_HANDLED; 1172 1173 partial: 1174 return TRACE_TYPE_PARTIAL_LINE; 1175 } 1176 1177 static struct trace_event_functions trace_user_stack_funcs = { 1178 .trace = trace_user_stack_print, 1179 }; 1180 1181 static struct trace_event trace_user_stack_event = { 1182 .type = TRACE_USER_STACK, 1183 .funcs = &trace_user_stack_funcs, 1184 }; 1185 1186 /* TRACE_BPRINT */ 1187 static enum print_line_t 1188 trace_bprint_print(struct trace_iterator *iter, int flags, 1189 struct trace_event *event) 1190 { 1191 struct trace_entry *entry = iter->ent; 1192 struct trace_seq *s = &iter->seq; 1193 struct bprint_entry *field; 1194 1195 trace_assign_type(field, entry); 1196 1197 if (!seq_print_ip_sym(s, field->ip, flags)) 1198 goto partial; 1199 1200 if (!trace_seq_puts(s, ": ")) 1201 goto partial; 1202 1203 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1204 goto partial; 1205 1206 return TRACE_TYPE_HANDLED; 1207 1208 partial: 1209 return TRACE_TYPE_PARTIAL_LINE; 1210 } 1211 1212 1213 static enum print_line_t 1214 trace_bprint_raw(struct trace_iterator *iter, int flags, 1215 struct trace_event *event) 1216 { 1217 struct bprint_entry *field; 1218 struct trace_seq *s = &iter->seq; 1219 1220 trace_assign_type(field, iter->ent); 1221 1222 if (!trace_seq_printf(s, ": %lx : ", field->ip)) 1223 goto partial; 1224 1225 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1226 goto partial; 1227 1228 return TRACE_TYPE_HANDLED; 1229 1230 partial: 1231 return TRACE_TYPE_PARTIAL_LINE; 1232 } 1233 1234 static struct trace_event_functions trace_bprint_funcs = { 1235 .trace = trace_bprint_print, 1236 .raw = trace_bprint_raw, 1237 }; 1238 1239 static struct trace_event trace_bprint_event = { 1240 .type = TRACE_BPRINT, 1241 .funcs = &trace_bprint_funcs, 1242 }; 1243 1244 /* TRACE_PRINT */ 1245 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1246 int flags, struct trace_event *event) 1247 { 1248 struct print_entry *field; 1249 struct trace_seq *s = &iter->seq; 1250 1251 trace_assign_type(field, iter->ent); 1252 1253 if (!seq_print_ip_sym(s, field->ip, flags)) 1254 goto partial; 1255 1256 if (!trace_seq_printf(s, ": %s", field->buf)) 1257 goto partial; 1258 1259 return TRACE_TYPE_HANDLED; 1260 1261 partial: 1262 return TRACE_TYPE_PARTIAL_LINE; 1263 } 1264 1265 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1266 struct trace_event *event) 1267 { 1268 struct print_entry *field; 1269 1270 trace_assign_type(field, iter->ent); 1271 1272 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) 1273 goto partial; 1274 1275 return TRACE_TYPE_HANDLED; 1276 1277 partial: 1278 return TRACE_TYPE_PARTIAL_LINE; 1279 } 1280 1281 static struct trace_event_functions trace_print_funcs = { 1282 .trace = trace_print_print, 1283 .raw = trace_print_raw, 1284 }; 1285 1286 static struct trace_event trace_print_event = { 1287 .type = TRACE_PRINT, 1288 .funcs = &trace_print_funcs, 1289 }; 1290 1291 1292 static struct trace_event *events[] __initdata = { 1293 &trace_fn_event, 1294 &trace_ctx_event, 1295 &trace_wake_event, 1296 &trace_stack_event, 1297 &trace_user_stack_event, 1298 &trace_bprint_event, 1299 &trace_print_event, 1300 NULL 1301 }; 1302 1303 __init static int init_events(void) 1304 { 1305 struct trace_event *event; 1306 int i, ret; 1307 1308 for (i = 0; events[i]; i++) { 1309 event = events[i]; 1310 1311 ret = register_ftrace_event(event); 1312 if (!ret) { 1313 printk(KERN_WARNING "event %d failed to register\n", 1314 event->type); 1315 WARN_ON_ONCE(1); 1316 } 1317 } 1318 1319 return 0; 1320 } 1321 device_initcall(init_events); 1322