1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_sem); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 24 { 25 struct trace_seq *s = &iter->seq; 26 struct trace_entry *entry = iter->ent; 27 struct bputs_entry *field; 28 29 trace_assign_type(field, entry); 30 31 trace_seq_puts(s, field->str); 32 33 return trace_handle_return(s); 34 } 35 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 37 { 38 struct trace_seq *s = &iter->seq; 39 struct trace_entry *entry = iter->ent; 40 struct bprint_entry *field; 41 42 trace_assign_type(field, entry); 43 44 trace_seq_bprintf(s, field->fmt, field->buf); 45 46 return trace_handle_return(s); 47 } 48 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 50 { 51 struct trace_seq *s = &iter->seq; 52 struct trace_entry *entry = iter->ent; 53 struct print_entry *field; 54 55 trace_assign_type(field, entry); 56 57 trace_seq_puts(s, field->buf); 58 59 return trace_handle_return(s); 60 } 61 62 const char * 63 ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 64 unsigned long flags, 65 const struct trace_print_flags *flag_array) 66 { 67 unsigned long mask; 68 const char *str; 69 const char *ret = trace_seq_buffer_ptr(p); 70 int i, first = 1; 71 72 for (i = 0; flag_array[i].name && flags; i++) { 73 74 mask = flag_array[i].mask; 75 if ((flags & mask) != mask) 76 continue; 77 78 str = flag_array[i].name; 79 flags &= ~mask; 80 if (!first && delim) 81 trace_seq_puts(p, delim); 82 else 83 first = 0; 84 trace_seq_puts(p, str); 85 } 86 87 /* check for left over flags */ 88 if (flags) { 89 if (!first && delim) 90 trace_seq_puts(p, delim); 91 trace_seq_printf(p, "0x%lx", flags); 92 } 93 94 trace_seq_putc(p, 0); 95 96 return ret; 97 } 98 EXPORT_SYMBOL(ftrace_print_flags_seq); 99 100 const char * 101 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 const struct trace_print_flags *symbol_array) 103 { 104 int i; 105 const char *ret = trace_seq_buffer_ptr(p); 106 107 for (i = 0; symbol_array[i].name; i++) { 108 109 if (val != symbol_array[i].mask) 110 continue; 111 112 trace_seq_puts(p, symbol_array[i].name); 113 break; 114 } 115 116 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 117 trace_seq_printf(p, "0x%lx", val); 118 119 trace_seq_putc(p, 0); 120 121 return ret; 122 } 123 EXPORT_SYMBOL(ftrace_print_symbols_seq); 124 125 #if BITS_PER_LONG == 32 126 const char * 127 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 128 const struct trace_print_flags_u64 *symbol_array) 129 { 130 int i; 131 const char *ret = trace_seq_buffer_ptr(p); 132 133 for (i = 0; symbol_array[i].name; i++) { 134 135 if (val != symbol_array[i].mask) 136 continue; 137 138 trace_seq_puts(p, symbol_array[i].name); 139 break; 140 } 141 142 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 143 trace_seq_printf(p, "0x%llx", val); 144 145 trace_seq_putc(p, 0); 146 147 return ret; 148 } 149 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); 150 #endif 151 152 const char * 153 ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 unsigned int bitmask_size) 155 { 156 const char *ret = trace_seq_buffer_ptr(p); 157 158 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 159 trace_seq_putc(p, 0); 160 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq); 164 165 const char * 166 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 167 { 168 int i; 169 const char *ret = trace_seq_buffer_ptr(p); 170 171 for (i = 0; i < buf_len; i++) 172 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 173 174 trace_seq_putc(p, 0); 175 176 return ret; 177 } 178 EXPORT_SYMBOL(ftrace_print_hex_seq); 179 180 const char * 181 ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, 182 size_t el_size) 183 { 184 const char *ret = trace_seq_buffer_ptr(p); 185 const char *prefix = ""; 186 void *ptr = (void *)buf; 187 188 trace_seq_putc(p, '{'); 189 190 while (ptr < buf + buf_len) { 191 switch (el_size) { 192 case 1: 193 trace_seq_printf(p, "%s0x%x", prefix, 194 *(u8 *)ptr); 195 break; 196 case 2: 197 trace_seq_printf(p, "%s0x%x", prefix, 198 *(u16 *)ptr); 199 break; 200 case 4: 201 trace_seq_printf(p, "%s0x%x", prefix, 202 *(u32 *)ptr); 203 break; 204 case 8: 205 trace_seq_printf(p, "%s0x%llx", prefix, 206 *(u64 *)ptr); 207 break; 208 default: 209 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size, 210 *(u8 *)ptr); 211 el_size = 1; 212 } 213 prefix = ","; 214 ptr += el_size; 215 } 216 217 trace_seq_putc(p, '}'); 218 trace_seq_putc(p, 0); 219 220 return ret; 221 } 222 EXPORT_SYMBOL(ftrace_print_array_seq); 223 224 int ftrace_raw_output_prep(struct trace_iterator *iter, 225 struct trace_event *trace_event) 226 { 227 struct ftrace_event_call *event; 228 struct trace_seq *s = &iter->seq; 229 struct trace_seq *p = &iter->tmp_seq; 230 struct trace_entry *entry; 231 232 event = container_of(trace_event, struct ftrace_event_call, event); 233 entry = iter->ent; 234 235 if (entry->type != event->event.type) { 236 WARN_ON_ONCE(1); 237 return TRACE_TYPE_UNHANDLED; 238 } 239 240 trace_seq_init(p); 241 trace_seq_printf(s, "%s: ", ftrace_event_name(event)); 242 243 return trace_handle_return(s); 244 } 245 EXPORT_SYMBOL(ftrace_raw_output_prep); 246 247 static int ftrace_output_raw(struct trace_iterator *iter, char *name, 248 char *fmt, va_list ap) 249 { 250 struct trace_seq *s = &iter->seq; 251 252 trace_seq_printf(s, "%s: ", name); 253 trace_seq_vprintf(s, fmt, ap); 254 255 return trace_handle_return(s); 256 } 257 258 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 259 { 260 va_list ap; 261 int ret; 262 263 va_start(ap, fmt); 264 ret = ftrace_output_raw(iter, name, fmt, ap); 265 va_end(ap); 266 267 return ret; 268 } 269 EXPORT_SYMBOL_GPL(ftrace_output_call); 270 271 #ifdef CONFIG_KRETPROBES 272 static inline const char *kretprobed(const char *name) 273 { 274 static const char tramp_name[] = "kretprobe_trampoline"; 275 int size = sizeof(tramp_name); 276 277 if (strncmp(tramp_name, name, size) == 0) 278 return "[unknown/kretprobe'd]"; 279 return name; 280 } 281 #else 282 static inline const char *kretprobed(const char *name) 283 { 284 return name; 285 } 286 #endif /* CONFIG_KRETPROBES */ 287 288 static void 289 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 290 { 291 #ifdef CONFIG_KALLSYMS 292 char str[KSYM_SYMBOL_LEN]; 293 const char *name; 294 295 kallsyms_lookup(address, NULL, NULL, NULL, str); 296 297 name = kretprobed(str); 298 299 trace_seq_printf(s, fmt, name); 300 #endif 301 } 302 303 static void 304 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 305 unsigned long address) 306 { 307 #ifdef CONFIG_KALLSYMS 308 char str[KSYM_SYMBOL_LEN]; 309 const char *name; 310 311 sprint_symbol(str, address); 312 name = kretprobed(str); 313 314 trace_seq_printf(s, fmt, name); 315 #endif 316 } 317 318 #ifndef CONFIG_64BIT 319 # define IP_FMT "%08lx" 320 #else 321 # define IP_FMT "%016lx" 322 #endif 323 324 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 325 unsigned long ip, unsigned long sym_flags) 326 { 327 struct file *file = NULL; 328 unsigned long vmstart = 0; 329 int ret = 1; 330 331 if (s->full) 332 return 0; 333 334 if (mm) { 335 const struct vm_area_struct *vma; 336 337 down_read(&mm->mmap_sem); 338 vma = find_vma(mm, ip); 339 if (vma) { 340 file = vma->vm_file; 341 vmstart = vma->vm_start; 342 } 343 if (file) { 344 ret = trace_seq_path(s, &file->f_path); 345 if (ret) 346 trace_seq_printf(s, "[+0x%lx]", 347 ip - vmstart); 348 } 349 up_read(&mm->mmap_sem); 350 } 351 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 352 trace_seq_printf(s, " <" IP_FMT ">", ip); 353 return !trace_seq_has_overflowed(s); 354 } 355 356 int 357 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 358 unsigned long sym_flags) 359 { 360 struct mm_struct *mm = NULL; 361 unsigned int i; 362 363 if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 364 struct task_struct *task; 365 /* 366 * we do the lookup on the thread group leader, 367 * since individual threads might have already quit! 368 */ 369 rcu_read_lock(); 370 task = find_task_by_vpid(entry->tgid); 371 if (task) 372 mm = get_task_mm(task); 373 rcu_read_unlock(); 374 } 375 376 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 377 unsigned long ip = entry->caller[i]; 378 379 if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 380 break; 381 382 trace_seq_puts(s, " => "); 383 384 if (!ip) { 385 trace_seq_puts(s, "??"); 386 trace_seq_putc(s, '\n'); 387 continue; 388 } 389 390 seq_print_user_ip(s, mm, ip, sym_flags); 391 trace_seq_putc(s, '\n'); 392 } 393 394 if (mm) 395 mmput(mm); 396 397 return !trace_seq_has_overflowed(s); 398 } 399 400 int 401 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 402 { 403 if (!ip) { 404 trace_seq_putc(s, '0'); 405 goto out; 406 } 407 408 if (sym_flags & TRACE_ITER_SYM_OFFSET) 409 seq_print_sym_offset(s, "%s", ip); 410 else 411 seq_print_sym_short(s, "%s", ip); 412 413 if (sym_flags & TRACE_ITER_SYM_ADDR) 414 trace_seq_printf(s, " <" IP_FMT ">", ip); 415 416 out: 417 return !trace_seq_has_overflowed(s); 418 } 419 420 /** 421 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 422 * @s: trace seq struct to write to 423 * @entry: The trace entry field from the ring buffer 424 * 425 * Prints the generic fields of irqs off, in hard or softirq, preempt 426 * count. 427 */ 428 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 429 { 430 char hardsoft_irq; 431 char need_resched; 432 char irqs_off; 433 int hardirq; 434 int softirq; 435 436 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 437 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 438 439 irqs_off = 440 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 441 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 442 '.'; 443 444 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | 445 TRACE_FLAG_PREEMPT_RESCHED)) { 446 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: 447 need_resched = 'N'; 448 break; 449 case TRACE_FLAG_NEED_RESCHED: 450 need_resched = 'n'; 451 break; 452 case TRACE_FLAG_PREEMPT_RESCHED: 453 need_resched = 'p'; 454 break; 455 default: 456 need_resched = '.'; 457 break; 458 } 459 460 hardsoft_irq = 461 (hardirq && softirq) ? 'H' : 462 hardirq ? 'h' : 463 softirq ? 's' : 464 '.'; 465 466 trace_seq_printf(s, "%c%c%c", 467 irqs_off, need_resched, hardsoft_irq); 468 469 if (entry->preempt_count) 470 trace_seq_printf(s, "%x", entry->preempt_count); 471 else 472 trace_seq_putc(s, '.'); 473 474 return !trace_seq_has_overflowed(s); 475 } 476 477 static int 478 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 479 { 480 char comm[TASK_COMM_LEN]; 481 482 trace_find_cmdline(entry->pid, comm); 483 484 trace_seq_printf(s, "%8.8s-%-5d %3d", 485 comm, entry->pid, cpu); 486 487 return trace_print_lat_fmt(s, entry); 488 } 489 490 #undef MARK 491 #define MARK(v, s) {.val = v, .sym = s} 492 /* trace overhead mark */ 493 static const struct trace_mark { 494 unsigned long long val; /* unit: nsec */ 495 char sym; 496 } mark[] = { 497 MARK(1000000000ULL , '$'), /* 1 sec */ 498 MARK(1000000ULL , '#'), /* 1000 usecs */ 499 MARK(100000ULL , '!'), /* 100 usecs */ 500 MARK(10000ULL , '+'), /* 10 usecs */ 501 }; 502 #undef MARK 503 504 char trace_find_mark(unsigned long long d) 505 { 506 int i; 507 int size = ARRAY_SIZE(mark); 508 509 for (i = 0; i < size; i++) { 510 if (d >= mark[i].val) 511 break; 512 } 513 514 return (i == size) ? ' ' : mark[i].sym; 515 } 516 517 static int 518 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 519 { 520 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 521 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 522 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 523 unsigned long long rel_ts = next_ts - iter->ts; 524 struct trace_seq *s = &iter->seq; 525 526 if (in_ns) { 527 abs_ts = ns2usecs(abs_ts); 528 rel_ts = ns2usecs(rel_ts); 529 } 530 531 if (verbose && in_ns) { 532 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 533 unsigned long abs_msec = (unsigned long)abs_ts; 534 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 535 unsigned long rel_msec = (unsigned long)rel_ts; 536 537 trace_seq_printf( 538 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 539 ns2usecs(iter->ts), 540 abs_msec, abs_usec, 541 rel_msec, rel_usec); 542 543 } else if (verbose && !in_ns) { 544 trace_seq_printf( 545 s, "[%016llx] %lld (+%lld): ", 546 iter->ts, abs_ts, rel_ts); 547 548 } else if (!verbose && in_ns) { 549 trace_seq_printf( 550 s, " %4lldus%c: ", 551 abs_ts, 552 trace_find_mark(rel_ts * NSEC_PER_USEC)); 553 554 } else { /* !verbose && !in_ns */ 555 trace_seq_printf(s, " %4lld: ", abs_ts); 556 } 557 558 return !trace_seq_has_overflowed(s); 559 } 560 561 int trace_print_context(struct trace_iterator *iter) 562 { 563 struct trace_seq *s = &iter->seq; 564 struct trace_entry *entry = iter->ent; 565 unsigned long long t; 566 unsigned long secs, usec_rem; 567 char comm[TASK_COMM_LEN]; 568 569 trace_find_cmdline(entry->pid, comm); 570 571 trace_seq_printf(s, "%16s-%-5d [%03d] ", 572 comm, entry->pid, iter->cpu); 573 574 if (trace_flags & TRACE_ITER_IRQ_INFO) 575 trace_print_lat_fmt(s, entry); 576 577 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 578 t = ns2usecs(iter->ts); 579 usec_rem = do_div(t, USEC_PER_SEC); 580 secs = (unsigned long)t; 581 trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 582 } else 583 trace_seq_printf(s, " %12llu: ", iter->ts); 584 585 return !trace_seq_has_overflowed(s); 586 } 587 588 int trace_print_lat_context(struct trace_iterator *iter) 589 { 590 u64 next_ts; 591 /* trace_find_next_entry will reset ent_size */ 592 int ent_size = iter->ent_size; 593 struct trace_seq *s = &iter->seq; 594 struct trace_entry *entry = iter->ent, 595 *next_entry = trace_find_next_entry(iter, NULL, 596 &next_ts); 597 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 598 599 /* Restore the original ent_size */ 600 iter->ent_size = ent_size; 601 602 if (!next_entry) 603 next_ts = iter->ts; 604 605 if (verbose) { 606 char comm[TASK_COMM_LEN]; 607 608 trace_find_cmdline(entry->pid, comm); 609 610 trace_seq_printf( 611 s, "%16s %5d %3d %d %08x %08lx ", 612 comm, entry->pid, iter->cpu, entry->flags, 613 entry->preempt_count, iter->idx); 614 } else { 615 lat_print_generic(s, entry, iter->cpu); 616 } 617 618 lat_print_timestamp(iter, next_ts); 619 620 return !trace_seq_has_overflowed(s); 621 } 622 623 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 624 625 static int task_state_char(unsigned long state) 626 { 627 int bit = state ? __ffs(state) + 1 : 0; 628 629 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 630 } 631 632 /** 633 * ftrace_find_event - find a registered event 634 * @type: the type of event to look for 635 * 636 * Returns an event of type @type otherwise NULL 637 * Called with trace_event_read_lock() held. 638 */ 639 struct trace_event *ftrace_find_event(int type) 640 { 641 struct trace_event *event; 642 unsigned key; 643 644 key = type & (EVENT_HASHSIZE - 1); 645 646 hlist_for_each_entry(event, &event_hash[key], node) { 647 if (event->type == type) 648 return event; 649 } 650 651 return NULL; 652 } 653 654 static LIST_HEAD(ftrace_event_list); 655 656 static int trace_search_list(struct list_head **list) 657 { 658 struct trace_event *e; 659 int last = __TRACE_LAST_TYPE; 660 661 if (list_empty(&ftrace_event_list)) { 662 *list = &ftrace_event_list; 663 return last + 1; 664 } 665 666 /* 667 * We used up all possible max events, 668 * lets see if somebody freed one. 669 */ 670 list_for_each_entry(e, &ftrace_event_list, list) { 671 if (e->type != last + 1) 672 break; 673 last++; 674 } 675 676 /* Did we used up all 65 thousand events??? */ 677 if ((last + 1) > FTRACE_MAX_EVENT) 678 return 0; 679 680 *list = &e->list; 681 return last + 1; 682 } 683 684 void trace_event_read_lock(void) 685 { 686 down_read(&trace_event_sem); 687 } 688 689 void trace_event_read_unlock(void) 690 { 691 up_read(&trace_event_sem); 692 } 693 694 /** 695 * register_ftrace_event - register output for an event type 696 * @event: the event type to register 697 * 698 * Event types are stored in a hash and this hash is used to 699 * find a way to print an event. If the @event->type is set 700 * then it will use that type, otherwise it will assign a 701 * type to use. 702 * 703 * If you assign your own type, please make sure it is added 704 * to the trace_type enum in trace.h, to avoid collisions 705 * with the dynamic types. 706 * 707 * Returns the event type number or zero on error. 708 */ 709 int register_ftrace_event(struct trace_event *event) 710 { 711 unsigned key; 712 int ret = 0; 713 714 down_write(&trace_event_sem); 715 716 if (WARN_ON(!event)) 717 goto out; 718 719 if (WARN_ON(!event->funcs)) 720 goto out; 721 722 INIT_LIST_HEAD(&event->list); 723 724 if (!event->type) { 725 struct list_head *list = NULL; 726 727 if (next_event_type > FTRACE_MAX_EVENT) { 728 729 event->type = trace_search_list(&list); 730 if (!event->type) 731 goto out; 732 733 } else { 734 735 event->type = next_event_type++; 736 list = &ftrace_event_list; 737 } 738 739 if (WARN_ON(ftrace_find_event(event->type))) 740 goto out; 741 742 list_add_tail(&event->list, list); 743 744 } else if (event->type > __TRACE_LAST_TYPE) { 745 printk(KERN_WARNING "Need to add type to trace.h\n"); 746 WARN_ON(1); 747 goto out; 748 } else { 749 /* Is this event already used */ 750 if (ftrace_find_event(event->type)) 751 goto out; 752 } 753 754 if (event->funcs->trace == NULL) 755 event->funcs->trace = trace_nop_print; 756 if (event->funcs->raw == NULL) 757 event->funcs->raw = trace_nop_print; 758 if (event->funcs->hex == NULL) 759 event->funcs->hex = trace_nop_print; 760 if (event->funcs->binary == NULL) 761 event->funcs->binary = trace_nop_print; 762 763 key = event->type & (EVENT_HASHSIZE - 1); 764 765 hlist_add_head(&event->node, &event_hash[key]); 766 767 ret = event->type; 768 out: 769 up_write(&trace_event_sem); 770 771 return ret; 772 } 773 EXPORT_SYMBOL_GPL(register_ftrace_event); 774 775 /* 776 * Used by module code with the trace_event_sem held for write. 777 */ 778 int __unregister_ftrace_event(struct trace_event *event) 779 { 780 hlist_del(&event->node); 781 list_del(&event->list); 782 return 0; 783 } 784 785 /** 786 * unregister_ftrace_event - remove a no longer used event 787 * @event: the event to remove 788 */ 789 int unregister_ftrace_event(struct trace_event *event) 790 { 791 down_write(&trace_event_sem); 792 __unregister_ftrace_event(event); 793 up_write(&trace_event_sem); 794 795 return 0; 796 } 797 EXPORT_SYMBOL_GPL(unregister_ftrace_event); 798 799 /* 800 * Standard events 801 */ 802 803 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 804 struct trace_event *event) 805 { 806 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); 807 808 return trace_handle_return(&iter->seq); 809 } 810 811 /* TRACE_FN */ 812 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 813 struct trace_event *event) 814 { 815 struct ftrace_entry *field; 816 struct trace_seq *s = &iter->seq; 817 818 trace_assign_type(field, iter->ent); 819 820 seq_print_ip_sym(s, field->ip, flags); 821 822 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 823 trace_seq_puts(s, " <-"); 824 seq_print_ip_sym(s, field->parent_ip, flags); 825 } 826 827 trace_seq_putc(s, '\n'); 828 829 return trace_handle_return(s); 830 } 831 832 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 833 struct trace_event *event) 834 { 835 struct ftrace_entry *field; 836 837 trace_assign_type(field, iter->ent); 838 839 trace_seq_printf(&iter->seq, "%lx %lx\n", 840 field->ip, 841 field->parent_ip); 842 843 return trace_handle_return(&iter->seq); 844 } 845 846 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 847 struct trace_event *event) 848 { 849 struct ftrace_entry *field; 850 struct trace_seq *s = &iter->seq; 851 852 trace_assign_type(field, iter->ent); 853 854 SEQ_PUT_HEX_FIELD(s, field->ip); 855 SEQ_PUT_HEX_FIELD(s, field->parent_ip); 856 857 return trace_handle_return(s); 858 } 859 860 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 861 struct trace_event *event) 862 { 863 struct ftrace_entry *field; 864 struct trace_seq *s = &iter->seq; 865 866 trace_assign_type(field, iter->ent); 867 868 SEQ_PUT_FIELD(s, field->ip); 869 SEQ_PUT_FIELD(s, field->parent_ip); 870 871 return trace_handle_return(s); 872 } 873 874 static struct trace_event_functions trace_fn_funcs = { 875 .trace = trace_fn_trace, 876 .raw = trace_fn_raw, 877 .hex = trace_fn_hex, 878 .binary = trace_fn_bin, 879 }; 880 881 static struct trace_event trace_fn_event = { 882 .type = TRACE_FN, 883 .funcs = &trace_fn_funcs, 884 }; 885 886 /* TRACE_CTX an TRACE_WAKE */ 887 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 888 char *delim) 889 { 890 struct ctx_switch_entry *field; 891 char comm[TASK_COMM_LEN]; 892 int S, T; 893 894 895 trace_assign_type(field, iter->ent); 896 897 T = task_state_char(field->next_state); 898 S = task_state_char(field->prev_state); 899 trace_find_cmdline(field->next_pid, comm); 900 trace_seq_printf(&iter->seq, 901 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 902 field->prev_pid, 903 field->prev_prio, 904 S, delim, 905 field->next_cpu, 906 field->next_pid, 907 field->next_prio, 908 T, comm); 909 910 return trace_handle_return(&iter->seq); 911 } 912 913 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 914 struct trace_event *event) 915 { 916 return trace_ctxwake_print(iter, "==>"); 917 } 918 919 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 920 int flags, struct trace_event *event) 921 { 922 return trace_ctxwake_print(iter, " +"); 923 } 924 925 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 926 { 927 struct ctx_switch_entry *field; 928 int T; 929 930 trace_assign_type(field, iter->ent); 931 932 if (!S) 933 S = task_state_char(field->prev_state); 934 T = task_state_char(field->next_state); 935 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 936 field->prev_pid, 937 field->prev_prio, 938 S, 939 field->next_cpu, 940 field->next_pid, 941 field->next_prio, 942 T); 943 944 return trace_handle_return(&iter->seq); 945 } 946 947 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 948 struct trace_event *event) 949 { 950 return trace_ctxwake_raw(iter, 0); 951 } 952 953 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 954 struct trace_event *event) 955 { 956 return trace_ctxwake_raw(iter, '+'); 957 } 958 959 960 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 961 { 962 struct ctx_switch_entry *field; 963 struct trace_seq *s = &iter->seq; 964 int T; 965 966 trace_assign_type(field, iter->ent); 967 968 if (!S) 969 S = task_state_char(field->prev_state); 970 T = task_state_char(field->next_state); 971 972 SEQ_PUT_HEX_FIELD(s, field->prev_pid); 973 SEQ_PUT_HEX_FIELD(s, field->prev_prio); 974 SEQ_PUT_HEX_FIELD(s, S); 975 SEQ_PUT_HEX_FIELD(s, field->next_cpu); 976 SEQ_PUT_HEX_FIELD(s, field->next_pid); 977 SEQ_PUT_HEX_FIELD(s, field->next_prio); 978 SEQ_PUT_HEX_FIELD(s, T); 979 980 return trace_handle_return(s); 981 } 982 983 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 984 struct trace_event *event) 985 { 986 return trace_ctxwake_hex(iter, 0); 987 } 988 989 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 990 struct trace_event *event) 991 { 992 return trace_ctxwake_hex(iter, '+'); 993 } 994 995 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 996 int flags, struct trace_event *event) 997 { 998 struct ctx_switch_entry *field; 999 struct trace_seq *s = &iter->seq; 1000 1001 trace_assign_type(field, iter->ent); 1002 1003 SEQ_PUT_FIELD(s, field->prev_pid); 1004 SEQ_PUT_FIELD(s, field->prev_prio); 1005 SEQ_PUT_FIELD(s, field->prev_state); 1006 SEQ_PUT_FIELD(s, field->next_cpu); 1007 SEQ_PUT_FIELD(s, field->next_pid); 1008 SEQ_PUT_FIELD(s, field->next_prio); 1009 SEQ_PUT_FIELD(s, field->next_state); 1010 1011 return trace_handle_return(s); 1012 } 1013 1014 static struct trace_event_functions trace_ctx_funcs = { 1015 .trace = trace_ctx_print, 1016 .raw = trace_ctx_raw, 1017 .hex = trace_ctx_hex, 1018 .binary = trace_ctxwake_bin, 1019 }; 1020 1021 static struct trace_event trace_ctx_event = { 1022 .type = TRACE_CTX, 1023 .funcs = &trace_ctx_funcs, 1024 }; 1025 1026 static struct trace_event_functions trace_wake_funcs = { 1027 .trace = trace_wake_print, 1028 .raw = trace_wake_raw, 1029 .hex = trace_wake_hex, 1030 .binary = trace_ctxwake_bin, 1031 }; 1032 1033 static struct trace_event trace_wake_event = { 1034 .type = TRACE_WAKE, 1035 .funcs = &trace_wake_funcs, 1036 }; 1037 1038 /* TRACE_STACK */ 1039 1040 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1041 int flags, struct trace_event *event) 1042 { 1043 struct stack_entry *field; 1044 struct trace_seq *s = &iter->seq; 1045 unsigned long *p; 1046 unsigned long *end; 1047 1048 trace_assign_type(field, iter->ent); 1049 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1050 1051 trace_seq_puts(s, "<stack trace>\n"); 1052 1053 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1054 1055 if (trace_seq_has_overflowed(s)) 1056 break; 1057 1058 trace_seq_puts(s, " => "); 1059 seq_print_ip_sym(s, *p, flags); 1060 trace_seq_putc(s, '\n'); 1061 } 1062 1063 return trace_handle_return(s); 1064 } 1065 1066 static struct trace_event_functions trace_stack_funcs = { 1067 .trace = trace_stack_print, 1068 }; 1069 1070 static struct trace_event trace_stack_event = { 1071 .type = TRACE_STACK, 1072 .funcs = &trace_stack_funcs, 1073 }; 1074 1075 /* TRACE_USER_STACK */ 1076 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1077 int flags, struct trace_event *event) 1078 { 1079 struct userstack_entry *field; 1080 struct trace_seq *s = &iter->seq; 1081 1082 trace_assign_type(field, iter->ent); 1083 1084 trace_seq_puts(s, "<user stack trace>\n"); 1085 seq_print_userip_objs(field, s, flags); 1086 1087 return trace_handle_return(s); 1088 } 1089 1090 static struct trace_event_functions trace_user_stack_funcs = { 1091 .trace = trace_user_stack_print, 1092 }; 1093 1094 static struct trace_event trace_user_stack_event = { 1095 .type = TRACE_USER_STACK, 1096 .funcs = &trace_user_stack_funcs, 1097 }; 1098 1099 /* TRACE_BPUTS */ 1100 static enum print_line_t 1101 trace_bputs_print(struct trace_iterator *iter, int flags, 1102 struct trace_event *event) 1103 { 1104 struct trace_entry *entry = iter->ent; 1105 struct trace_seq *s = &iter->seq; 1106 struct bputs_entry *field; 1107 1108 trace_assign_type(field, entry); 1109 1110 seq_print_ip_sym(s, field->ip, flags); 1111 trace_seq_puts(s, ": "); 1112 trace_seq_puts(s, field->str); 1113 1114 return trace_handle_return(s); 1115 } 1116 1117 1118 static enum print_line_t 1119 trace_bputs_raw(struct trace_iterator *iter, int flags, 1120 struct trace_event *event) 1121 { 1122 struct bputs_entry *field; 1123 struct trace_seq *s = &iter->seq; 1124 1125 trace_assign_type(field, iter->ent); 1126 1127 trace_seq_printf(s, ": %lx : ", field->ip); 1128 trace_seq_puts(s, field->str); 1129 1130 return trace_handle_return(s); 1131 } 1132 1133 static struct trace_event_functions trace_bputs_funcs = { 1134 .trace = trace_bputs_print, 1135 .raw = trace_bputs_raw, 1136 }; 1137 1138 static struct trace_event trace_bputs_event = { 1139 .type = TRACE_BPUTS, 1140 .funcs = &trace_bputs_funcs, 1141 }; 1142 1143 /* TRACE_BPRINT */ 1144 static enum print_line_t 1145 trace_bprint_print(struct trace_iterator *iter, int flags, 1146 struct trace_event *event) 1147 { 1148 struct trace_entry *entry = iter->ent; 1149 struct trace_seq *s = &iter->seq; 1150 struct bprint_entry *field; 1151 1152 trace_assign_type(field, entry); 1153 1154 seq_print_ip_sym(s, field->ip, flags); 1155 trace_seq_puts(s, ": "); 1156 trace_seq_bprintf(s, field->fmt, field->buf); 1157 1158 return trace_handle_return(s); 1159 } 1160 1161 1162 static enum print_line_t 1163 trace_bprint_raw(struct trace_iterator *iter, int flags, 1164 struct trace_event *event) 1165 { 1166 struct bprint_entry *field; 1167 struct trace_seq *s = &iter->seq; 1168 1169 trace_assign_type(field, iter->ent); 1170 1171 trace_seq_printf(s, ": %lx : ", field->ip); 1172 trace_seq_bprintf(s, field->fmt, field->buf); 1173 1174 return trace_handle_return(s); 1175 } 1176 1177 static struct trace_event_functions trace_bprint_funcs = { 1178 .trace = trace_bprint_print, 1179 .raw = trace_bprint_raw, 1180 }; 1181 1182 static struct trace_event trace_bprint_event = { 1183 .type = TRACE_BPRINT, 1184 .funcs = &trace_bprint_funcs, 1185 }; 1186 1187 /* TRACE_PRINT */ 1188 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1189 int flags, struct trace_event *event) 1190 { 1191 struct print_entry *field; 1192 struct trace_seq *s = &iter->seq; 1193 1194 trace_assign_type(field, iter->ent); 1195 1196 seq_print_ip_sym(s, field->ip, flags); 1197 trace_seq_printf(s, ": %s", field->buf); 1198 1199 return trace_handle_return(s); 1200 } 1201 1202 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1203 struct trace_event *event) 1204 { 1205 struct print_entry *field; 1206 1207 trace_assign_type(field, iter->ent); 1208 1209 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); 1210 1211 return trace_handle_return(&iter->seq); 1212 } 1213 1214 static struct trace_event_functions trace_print_funcs = { 1215 .trace = trace_print_print, 1216 .raw = trace_print_raw, 1217 }; 1218 1219 static struct trace_event trace_print_event = { 1220 .type = TRACE_PRINT, 1221 .funcs = &trace_print_funcs, 1222 }; 1223 1224 1225 static struct trace_event *events[] __initdata = { 1226 &trace_fn_event, 1227 &trace_ctx_event, 1228 &trace_wake_event, 1229 &trace_stack_event, 1230 &trace_user_stack_event, 1231 &trace_bputs_event, 1232 &trace_bprint_event, 1233 &trace_print_event, 1234 NULL 1235 }; 1236 1237 __init static int init_events(void) 1238 { 1239 struct trace_event *event; 1240 int i, ret; 1241 1242 for (i = 0; events[i]; i++) { 1243 event = events[i]; 1244 1245 ret = register_ftrace_event(event); 1246 if (!ret) { 1247 printk(KERN_WARNING "event %d failed to register\n", 1248 event->type); 1249 WARN_ON_ONCE(1); 1250 } 1251 } 1252 1253 return 0; 1254 } 1255 early_initcall(init_events); 1256