1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_sem); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 24 { 25 struct trace_seq *s = &iter->seq; 26 struct trace_entry *entry = iter->ent; 27 struct bputs_entry *field; 28 29 trace_assign_type(field, entry); 30 31 trace_seq_puts(s, field->str); 32 33 return trace_handle_return(s); 34 } 35 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 37 { 38 struct trace_seq *s = &iter->seq; 39 struct trace_entry *entry = iter->ent; 40 struct bprint_entry *field; 41 42 trace_assign_type(field, entry); 43 44 trace_seq_bprintf(s, field->fmt, field->buf); 45 46 return trace_handle_return(s); 47 } 48 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 50 { 51 struct trace_seq *s = &iter->seq; 52 struct trace_entry *entry = iter->ent; 53 struct print_entry *field; 54 55 trace_assign_type(field, entry); 56 57 trace_seq_puts(s, field->buf); 58 59 return trace_handle_return(s); 60 } 61 62 const char * 63 trace_print_flags_seq(struct trace_seq *p, const char *delim, 64 unsigned long flags, 65 const struct trace_print_flags *flag_array) 66 { 67 unsigned long mask; 68 const char *str; 69 const char *ret = trace_seq_buffer_ptr(p); 70 int i, first = 1; 71 72 for (i = 0; flag_array[i].name && flags; i++) { 73 74 mask = flag_array[i].mask; 75 if ((flags & mask) != mask) 76 continue; 77 78 str = flag_array[i].name; 79 flags &= ~mask; 80 if (!first && delim) 81 trace_seq_puts(p, delim); 82 else 83 first = 0; 84 trace_seq_puts(p, str); 85 } 86 87 /* check for left over flags */ 88 if (flags) { 89 if (!first && delim) 90 trace_seq_puts(p, delim); 91 trace_seq_printf(p, "0x%lx", flags); 92 } 93 94 trace_seq_putc(p, 0); 95 96 return ret; 97 } 98 EXPORT_SYMBOL(trace_print_flags_seq); 99 100 const char * 101 trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 const struct trace_print_flags *symbol_array) 103 { 104 int i; 105 const char *ret = trace_seq_buffer_ptr(p); 106 107 for (i = 0; symbol_array[i].name; i++) { 108 109 if (val != symbol_array[i].mask) 110 continue; 111 112 trace_seq_puts(p, symbol_array[i].name); 113 break; 114 } 115 116 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 117 trace_seq_printf(p, "0x%lx", val); 118 119 trace_seq_putc(p, 0); 120 121 return ret; 122 } 123 EXPORT_SYMBOL(trace_print_symbols_seq); 124 125 #if BITS_PER_LONG == 32 126 const char * 127 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 128 const struct trace_print_flags_u64 *symbol_array) 129 { 130 int i; 131 const char *ret = trace_seq_buffer_ptr(p); 132 133 for (i = 0; symbol_array[i].name; i++) { 134 135 if (val != symbol_array[i].mask) 136 continue; 137 138 trace_seq_puts(p, symbol_array[i].name); 139 break; 140 } 141 142 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 143 trace_seq_printf(p, "0x%llx", val); 144 145 trace_seq_putc(p, 0); 146 147 return ret; 148 } 149 EXPORT_SYMBOL(trace_print_symbols_seq_u64); 150 #endif 151 152 const char * 153 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 unsigned int bitmask_size) 155 { 156 const char *ret = trace_seq_buffer_ptr(p); 157 158 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 159 trace_seq_putc(p, 0); 160 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); 164 165 const char * 166 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 167 { 168 int i; 169 const char *ret = trace_seq_buffer_ptr(p); 170 171 for (i = 0; i < buf_len; i++) 172 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 173 174 trace_seq_putc(p, 0); 175 176 return ret; 177 } 178 EXPORT_SYMBOL(trace_print_hex_seq); 179 180 const char * 181 trace_print_array_seq(struct trace_seq *p, const void *buf, int count, 182 size_t el_size) 183 { 184 const char *ret = trace_seq_buffer_ptr(p); 185 const char *prefix = ""; 186 void *ptr = (void *)buf; 187 size_t buf_len = count * el_size; 188 189 trace_seq_putc(p, '{'); 190 191 while (ptr < buf + buf_len) { 192 switch (el_size) { 193 case 1: 194 trace_seq_printf(p, "%s0x%x", prefix, 195 *(u8 *)ptr); 196 break; 197 case 2: 198 trace_seq_printf(p, "%s0x%x", prefix, 199 *(u16 *)ptr); 200 break; 201 case 4: 202 trace_seq_printf(p, "%s0x%x", prefix, 203 *(u32 *)ptr); 204 break; 205 case 8: 206 trace_seq_printf(p, "%s0x%llx", prefix, 207 *(u64 *)ptr); 208 break; 209 default: 210 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size, 211 *(u8 *)ptr); 212 el_size = 1; 213 } 214 prefix = ","; 215 ptr += el_size; 216 } 217 218 trace_seq_putc(p, '}'); 219 trace_seq_putc(p, 0); 220 221 return ret; 222 } 223 EXPORT_SYMBOL(trace_print_array_seq); 224 225 int trace_raw_output_prep(struct trace_iterator *iter, 226 struct trace_event *trace_event) 227 { 228 struct trace_event_call *event; 229 struct trace_seq *s = &iter->seq; 230 struct trace_seq *p = &iter->tmp_seq; 231 struct trace_entry *entry; 232 233 event = container_of(trace_event, struct trace_event_call, event); 234 entry = iter->ent; 235 236 if (entry->type != event->event.type) { 237 WARN_ON_ONCE(1); 238 return TRACE_TYPE_UNHANDLED; 239 } 240 241 trace_seq_init(p); 242 trace_seq_printf(s, "%s: ", trace_event_name(event)); 243 244 return trace_handle_return(s); 245 } 246 EXPORT_SYMBOL(trace_raw_output_prep); 247 248 static int trace_output_raw(struct trace_iterator *iter, char *name, 249 char *fmt, va_list ap) 250 { 251 struct trace_seq *s = &iter->seq; 252 253 trace_seq_printf(s, "%s: ", name); 254 trace_seq_vprintf(s, fmt, ap); 255 256 return trace_handle_return(s); 257 } 258 259 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 260 { 261 va_list ap; 262 int ret; 263 264 va_start(ap, fmt); 265 ret = trace_output_raw(iter, name, fmt, ap); 266 va_end(ap); 267 268 return ret; 269 } 270 EXPORT_SYMBOL_GPL(trace_output_call); 271 272 #ifdef CONFIG_KRETPROBES 273 static inline const char *kretprobed(const char *name) 274 { 275 static const char tramp_name[] = "kretprobe_trampoline"; 276 int size = sizeof(tramp_name); 277 278 if (strncmp(tramp_name, name, size) == 0) 279 return "[unknown/kretprobe'd]"; 280 return name; 281 } 282 #else 283 static inline const char *kretprobed(const char *name) 284 { 285 return name; 286 } 287 #endif /* CONFIG_KRETPROBES */ 288 289 static void 290 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 291 { 292 #ifdef CONFIG_KALLSYMS 293 char str[KSYM_SYMBOL_LEN]; 294 const char *name; 295 296 kallsyms_lookup(address, NULL, NULL, NULL, str); 297 298 name = kretprobed(str); 299 300 trace_seq_printf(s, fmt, name); 301 #endif 302 } 303 304 static void 305 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 306 unsigned long address) 307 { 308 #ifdef CONFIG_KALLSYMS 309 char str[KSYM_SYMBOL_LEN]; 310 const char *name; 311 312 sprint_symbol(str, address); 313 name = kretprobed(str); 314 315 trace_seq_printf(s, fmt, name); 316 #endif 317 } 318 319 #ifndef CONFIG_64BIT 320 # define IP_FMT "%08lx" 321 #else 322 # define IP_FMT "%016lx" 323 #endif 324 325 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 326 unsigned long ip, unsigned long sym_flags) 327 { 328 struct file *file = NULL; 329 unsigned long vmstart = 0; 330 int ret = 1; 331 332 if (s->full) 333 return 0; 334 335 if (mm) { 336 const struct vm_area_struct *vma; 337 338 down_read(&mm->mmap_sem); 339 vma = find_vma(mm, ip); 340 if (vma) { 341 file = vma->vm_file; 342 vmstart = vma->vm_start; 343 } 344 if (file) { 345 ret = trace_seq_path(s, &file->f_path); 346 if (ret) 347 trace_seq_printf(s, "[+0x%lx]", 348 ip - vmstart); 349 } 350 up_read(&mm->mmap_sem); 351 } 352 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 353 trace_seq_printf(s, " <" IP_FMT ">", ip); 354 return !trace_seq_has_overflowed(s); 355 } 356 357 int 358 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 359 unsigned long sym_flags) 360 { 361 struct mm_struct *mm = NULL; 362 unsigned int i; 363 364 if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 365 struct task_struct *task; 366 /* 367 * we do the lookup on the thread group leader, 368 * since individual threads might have already quit! 369 */ 370 rcu_read_lock(); 371 task = find_task_by_vpid(entry->tgid); 372 if (task) 373 mm = get_task_mm(task); 374 rcu_read_unlock(); 375 } 376 377 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 378 unsigned long ip = entry->caller[i]; 379 380 if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 381 break; 382 383 trace_seq_puts(s, " => "); 384 385 if (!ip) { 386 trace_seq_puts(s, "??"); 387 trace_seq_putc(s, '\n'); 388 continue; 389 } 390 391 seq_print_user_ip(s, mm, ip, sym_flags); 392 trace_seq_putc(s, '\n'); 393 } 394 395 if (mm) 396 mmput(mm); 397 398 return !trace_seq_has_overflowed(s); 399 } 400 401 int 402 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 403 { 404 if (!ip) { 405 trace_seq_putc(s, '0'); 406 goto out; 407 } 408 409 if (sym_flags & TRACE_ITER_SYM_OFFSET) 410 seq_print_sym_offset(s, "%s", ip); 411 else 412 seq_print_sym_short(s, "%s", ip); 413 414 if (sym_flags & TRACE_ITER_SYM_ADDR) 415 trace_seq_printf(s, " <" IP_FMT ">", ip); 416 417 out: 418 return !trace_seq_has_overflowed(s); 419 } 420 421 /** 422 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 423 * @s: trace seq struct to write to 424 * @entry: The trace entry field from the ring buffer 425 * 426 * Prints the generic fields of irqs off, in hard or softirq, preempt 427 * count. 428 */ 429 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 430 { 431 char hardsoft_irq; 432 char need_resched; 433 char irqs_off; 434 int hardirq; 435 int softirq; 436 437 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 438 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 439 440 irqs_off = 441 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 442 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 443 '.'; 444 445 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | 446 TRACE_FLAG_PREEMPT_RESCHED)) { 447 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: 448 need_resched = 'N'; 449 break; 450 case TRACE_FLAG_NEED_RESCHED: 451 need_resched = 'n'; 452 break; 453 case TRACE_FLAG_PREEMPT_RESCHED: 454 need_resched = 'p'; 455 break; 456 default: 457 need_resched = '.'; 458 break; 459 } 460 461 hardsoft_irq = 462 (hardirq && softirq) ? 'H' : 463 hardirq ? 'h' : 464 softirq ? 's' : 465 '.'; 466 467 trace_seq_printf(s, "%c%c%c", 468 irqs_off, need_resched, hardsoft_irq); 469 470 if (entry->preempt_count) 471 trace_seq_printf(s, "%x", entry->preempt_count); 472 else 473 trace_seq_putc(s, '.'); 474 475 return !trace_seq_has_overflowed(s); 476 } 477 478 static int 479 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 480 { 481 char comm[TASK_COMM_LEN]; 482 483 trace_find_cmdline(entry->pid, comm); 484 485 trace_seq_printf(s, "%8.8s-%-5d %3d", 486 comm, entry->pid, cpu); 487 488 return trace_print_lat_fmt(s, entry); 489 } 490 491 #undef MARK 492 #define MARK(v, s) {.val = v, .sym = s} 493 /* trace overhead mark */ 494 static const struct trace_mark { 495 unsigned long long val; /* unit: nsec */ 496 char sym; 497 } mark[] = { 498 MARK(1000000000ULL , '$'), /* 1 sec */ 499 MARK(100000000ULL , '@'), /* 100 msec */ 500 MARK(10000000ULL , '*'), /* 10 msec */ 501 MARK(1000000ULL , '#'), /* 1000 usecs */ 502 MARK(100000ULL , '!'), /* 100 usecs */ 503 MARK(10000ULL , '+'), /* 10 usecs */ 504 }; 505 #undef MARK 506 507 char trace_find_mark(unsigned long long d) 508 { 509 int i; 510 int size = ARRAY_SIZE(mark); 511 512 for (i = 0; i < size; i++) { 513 if (d > mark[i].val) 514 break; 515 } 516 517 return (i == size) ? ' ' : mark[i].sym; 518 } 519 520 static int 521 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 522 { 523 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 524 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 525 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 526 unsigned long long rel_ts = next_ts - iter->ts; 527 struct trace_seq *s = &iter->seq; 528 529 if (in_ns) { 530 abs_ts = ns2usecs(abs_ts); 531 rel_ts = ns2usecs(rel_ts); 532 } 533 534 if (verbose && in_ns) { 535 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 536 unsigned long abs_msec = (unsigned long)abs_ts; 537 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 538 unsigned long rel_msec = (unsigned long)rel_ts; 539 540 trace_seq_printf( 541 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 542 ns2usecs(iter->ts), 543 abs_msec, abs_usec, 544 rel_msec, rel_usec); 545 546 } else if (verbose && !in_ns) { 547 trace_seq_printf( 548 s, "[%016llx] %lld (+%lld): ", 549 iter->ts, abs_ts, rel_ts); 550 551 } else if (!verbose && in_ns) { 552 trace_seq_printf( 553 s, " %4lldus%c: ", 554 abs_ts, 555 trace_find_mark(rel_ts * NSEC_PER_USEC)); 556 557 } else { /* !verbose && !in_ns */ 558 trace_seq_printf(s, " %4lld: ", abs_ts); 559 } 560 561 return !trace_seq_has_overflowed(s); 562 } 563 564 int trace_print_context(struct trace_iterator *iter) 565 { 566 struct trace_seq *s = &iter->seq; 567 struct trace_entry *entry = iter->ent; 568 unsigned long long t; 569 unsigned long secs, usec_rem; 570 char comm[TASK_COMM_LEN]; 571 572 trace_find_cmdline(entry->pid, comm); 573 574 trace_seq_printf(s, "%16s-%-5d [%03d] ", 575 comm, entry->pid, iter->cpu); 576 577 if (trace_flags & TRACE_ITER_IRQ_INFO) 578 trace_print_lat_fmt(s, entry); 579 580 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 581 t = ns2usecs(iter->ts); 582 usec_rem = do_div(t, USEC_PER_SEC); 583 secs = (unsigned long)t; 584 trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 585 } else 586 trace_seq_printf(s, " %12llu: ", iter->ts); 587 588 return !trace_seq_has_overflowed(s); 589 } 590 591 int trace_print_lat_context(struct trace_iterator *iter) 592 { 593 u64 next_ts; 594 /* trace_find_next_entry will reset ent_size */ 595 int ent_size = iter->ent_size; 596 struct trace_seq *s = &iter->seq; 597 struct trace_entry *entry = iter->ent, 598 *next_entry = trace_find_next_entry(iter, NULL, 599 &next_ts); 600 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 601 602 /* Restore the original ent_size */ 603 iter->ent_size = ent_size; 604 605 if (!next_entry) 606 next_ts = iter->ts; 607 608 if (verbose) { 609 char comm[TASK_COMM_LEN]; 610 611 trace_find_cmdline(entry->pid, comm); 612 613 trace_seq_printf( 614 s, "%16s %5d %3d %d %08x %08lx ", 615 comm, entry->pid, iter->cpu, entry->flags, 616 entry->preempt_count, iter->idx); 617 } else { 618 lat_print_generic(s, entry, iter->cpu); 619 } 620 621 lat_print_timestamp(iter, next_ts); 622 623 return !trace_seq_has_overflowed(s); 624 } 625 626 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 627 628 static int task_state_char(unsigned long state) 629 { 630 int bit = state ? __ffs(state) + 1 : 0; 631 632 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 633 } 634 635 /** 636 * ftrace_find_event - find a registered event 637 * @type: the type of event to look for 638 * 639 * Returns an event of type @type otherwise NULL 640 * Called with trace_event_read_lock() held. 641 */ 642 struct trace_event *ftrace_find_event(int type) 643 { 644 struct trace_event *event; 645 unsigned key; 646 647 key = type & (EVENT_HASHSIZE - 1); 648 649 hlist_for_each_entry(event, &event_hash[key], node) { 650 if (event->type == type) 651 return event; 652 } 653 654 return NULL; 655 } 656 657 static LIST_HEAD(ftrace_event_list); 658 659 static int trace_search_list(struct list_head **list) 660 { 661 struct trace_event *e; 662 int last = __TRACE_LAST_TYPE; 663 664 if (list_empty(&ftrace_event_list)) { 665 *list = &ftrace_event_list; 666 return last + 1; 667 } 668 669 /* 670 * We used up all possible max events, 671 * lets see if somebody freed one. 672 */ 673 list_for_each_entry(e, &ftrace_event_list, list) { 674 if (e->type != last + 1) 675 break; 676 last++; 677 } 678 679 /* Did we used up all 65 thousand events??? */ 680 if ((last + 1) > TRACE_EVENT_TYPE_MAX) 681 return 0; 682 683 *list = &e->list; 684 return last + 1; 685 } 686 687 void trace_event_read_lock(void) 688 { 689 down_read(&trace_event_sem); 690 } 691 692 void trace_event_read_unlock(void) 693 { 694 up_read(&trace_event_sem); 695 } 696 697 /** 698 * register_trace_event - register output for an event type 699 * @event: the event type to register 700 * 701 * Event types are stored in a hash and this hash is used to 702 * find a way to print an event. If the @event->type is set 703 * then it will use that type, otherwise it will assign a 704 * type to use. 705 * 706 * If you assign your own type, please make sure it is added 707 * to the trace_type enum in trace.h, to avoid collisions 708 * with the dynamic types. 709 * 710 * Returns the event type number or zero on error. 711 */ 712 int register_trace_event(struct trace_event *event) 713 { 714 unsigned key; 715 int ret = 0; 716 717 down_write(&trace_event_sem); 718 719 if (WARN_ON(!event)) 720 goto out; 721 722 if (WARN_ON(!event->funcs)) 723 goto out; 724 725 INIT_LIST_HEAD(&event->list); 726 727 if (!event->type) { 728 struct list_head *list = NULL; 729 730 if (next_event_type > TRACE_EVENT_TYPE_MAX) { 731 732 event->type = trace_search_list(&list); 733 if (!event->type) 734 goto out; 735 736 } else { 737 738 event->type = next_event_type++; 739 list = &ftrace_event_list; 740 } 741 742 if (WARN_ON(ftrace_find_event(event->type))) 743 goto out; 744 745 list_add_tail(&event->list, list); 746 747 } else if (event->type > __TRACE_LAST_TYPE) { 748 printk(KERN_WARNING "Need to add type to trace.h\n"); 749 WARN_ON(1); 750 goto out; 751 } else { 752 /* Is this event already used */ 753 if (ftrace_find_event(event->type)) 754 goto out; 755 } 756 757 if (event->funcs->trace == NULL) 758 event->funcs->trace = trace_nop_print; 759 if (event->funcs->raw == NULL) 760 event->funcs->raw = trace_nop_print; 761 if (event->funcs->hex == NULL) 762 event->funcs->hex = trace_nop_print; 763 if (event->funcs->binary == NULL) 764 event->funcs->binary = trace_nop_print; 765 766 key = event->type & (EVENT_HASHSIZE - 1); 767 768 hlist_add_head(&event->node, &event_hash[key]); 769 770 ret = event->type; 771 out: 772 up_write(&trace_event_sem); 773 774 return ret; 775 } 776 EXPORT_SYMBOL_GPL(register_trace_event); 777 778 /* 779 * Used by module code with the trace_event_sem held for write. 780 */ 781 int __unregister_trace_event(struct trace_event *event) 782 { 783 hlist_del(&event->node); 784 list_del(&event->list); 785 return 0; 786 } 787 788 /** 789 * unregister_trace_event - remove a no longer used event 790 * @event: the event to remove 791 */ 792 int unregister_trace_event(struct trace_event *event) 793 { 794 down_write(&trace_event_sem); 795 __unregister_trace_event(event); 796 up_write(&trace_event_sem); 797 798 return 0; 799 } 800 EXPORT_SYMBOL_GPL(unregister_trace_event); 801 802 /* 803 * Standard events 804 */ 805 806 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 807 struct trace_event *event) 808 { 809 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); 810 811 return trace_handle_return(&iter->seq); 812 } 813 814 /* TRACE_FN */ 815 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 816 struct trace_event *event) 817 { 818 struct ftrace_entry *field; 819 struct trace_seq *s = &iter->seq; 820 821 trace_assign_type(field, iter->ent); 822 823 seq_print_ip_sym(s, field->ip, flags); 824 825 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 826 trace_seq_puts(s, " <-"); 827 seq_print_ip_sym(s, field->parent_ip, flags); 828 } 829 830 trace_seq_putc(s, '\n'); 831 832 return trace_handle_return(s); 833 } 834 835 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 836 struct trace_event *event) 837 { 838 struct ftrace_entry *field; 839 840 trace_assign_type(field, iter->ent); 841 842 trace_seq_printf(&iter->seq, "%lx %lx\n", 843 field->ip, 844 field->parent_ip); 845 846 return trace_handle_return(&iter->seq); 847 } 848 849 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 850 struct trace_event *event) 851 { 852 struct ftrace_entry *field; 853 struct trace_seq *s = &iter->seq; 854 855 trace_assign_type(field, iter->ent); 856 857 SEQ_PUT_HEX_FIELD(s, field->ip); 858 SEQ_PUT_HEX_FIELD(s, field->parent_ip); 859 860 return trace_handle_return(s); 861 } 862 863 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 864 struct trace_event *event) 865 { 866 struct ftrace_entry *field; 867 struct trace_seq *s = &iter->seq; 868 869 trace_assign_type(field, iter->ent); 870 871 SEQ_PUT_FIELD(s, field->ip); 872 SEQ_PUT_FIELD(s, field->parent_ip); 873 874 return trace_handle_return(s); 875 } 876 877 static struct trace_event_functions trace_fn_funcs = { 878 .trace = trace_fn_trace, 879 .raw = trace_fn_raw, 880 .hex = trace_fn_hex, 881 .binary = trace_fn_bin, 882 }; 883 884 static struct trace_event trace_fn_event = { 885 .type = TRACE_FN, 886 .funcs = &trace_fn_funcs, 887 }; 888 889 /* TRACE_CTX an TRACE_WAKE */ 890 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 891 char *delim) 892 { 893 struct ctx_switch_entry *field; 894 char comm[TASK_COMM_LEN]; 895 int S, T; 896 897 898 trace_assign_type(field, iter->ent); 899 900 T = task_state_char(field->next_state); 901 S = task_state_char(field->prev_state); 902 trace_find_cmdline(field->next_pid, comm); 903 trace_seq_printf(&iter->seq, 904 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 905 field->prev_pid, 906 field->prev_prio, 907 S, delim, 908 field->next_cpu, 909 field->next_pid, 910 field->next_prio, 911 T, comm); 912 913 return trace_handle_return(&iter->seq); 914 } 915 916 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 917 struct trace_event *event) 918 { 919 return trace_ctxwake_print(iter, "==>"); 920 } 921 922 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 923 int flags, struct trace_event *event) 924 { 925 return trace_ctxwake_print(iter, " +"); 926 } 927 928 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 929 { 930 struct ctx_switch_entry *field; 931 int T; 932 933 trace_assign_type(field, iter->ent); 934 935 if (!S) 936 S = task_state_char(field->prev_state); 937 T = task_state_char(field->next_state); 938 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 939 field->prev_pid, 940 field->prev_prio, 941 S, 942 field->next_cpu, 943 field->next_pid, 944 field->next_prio, 945 T); 946 947 return trace_handle_return(&iter->seq); 948 } 949 950 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 951 struct trace_event *event) 952 { 953 return trace_ctxwake_raw(iter, 0); 954 } 955 956 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 957 struct trace_event *event) 958 { 959 return trace_ctxwake_raw(iter, '+'); 960 } 961 962 963 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 964 { 965 struct ctx_switch_entry *field; 966 struct trace_seq *s = &iter->seq; 967 int T; 968 969 trace_assign_type(field, iter->ent); 970 971 if (!S) 972 S = task_state_char(field->prev_state); 973 T = task_state_char(field->next_state); 974 975 SEQ_PUT_HEX_FIELD(s, field->prev_pid); 976 SEQ_PUT_HEX_FIELD(s, field->prev_prio); 977 SEQ_PUT_HEX_FIELD(s, S); 978 SEQ_PUT_HEX_FIELD(s, field->next_cpu); 979 SEQ_PUT_HEX_FIELD(s, field->next_pid); 980 SEQ_PUT_HEX_FIELD(s, field->next_prio); 981 SEQ_PUT_HEX_FIELD(s, T); 982 983 return trace_handle_return(s); 984 } 985 986 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 987 struct trace_event *event) 988 { 989 return trace_ctxwake_hex(iter, 0); 990 } 991 992 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 993 struct trace_event *event) 994 { 995 return trace_ctxwake_hex(iter, '+'); 996 } 997 998 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 999 int flags, struct trace_event *event) 1000 { 1001 struct ctx_switch_entry *field; 1002 struct trace_seq *s = &iter->seq; 1003 1004 trace_assign_type(field, iter->ent); 1005 1006 SEQ_PUT_FIELD(s, field->prev_pid); 1007 SEQ_PUT_FIELD(s, field->prev_prio); 1008 SEQ_PUT_FIELD(s, field->prev_state); 1009 SEQ_PUT_FIELD(s, field->next_cpu); 1010 SEQ_PUT_FIELD(s, field->next_pid); 1011 SEQ_PUT_FIELD(s, field->next_prio); 1012 SEQ_PUT_FIELD(s, field->next_state); 1013 1014 return trace_handle_return(s); 1015 } 1016 1017 static struct trace_event_functions trace_ctx_funcs = { 1018 .trace = trace_ctx_print, 1019 .raw = trace_ctx_raw, 1020 .hex = trace_ctx_hex, 1021 .binary = trace_ctxwake_bin, 1022 }; 1023 1024 static struct trace_event trace_ctx_event = { 1025 .type = TRACE_CTX, 1026 .funcs = &trace_ctx_funcs, 1027 }; 1028 1029 static struct trace_event_functions trace_wake_funcs = { 1030 .trace = trace_wake_print, 1031 .raw = trace_wake_raw, 1032 .hex = trace_wake_hex, 1033 .binary = trace_ctxwake_bin, 1034 }; 1035 1036 static struct trace_event trace_wake_event = { 1037 .type = TRACE_WAKE, 1038 .funcs = &trace_wake_funcs, 1039 }; 1040 1041 /* TRACE_STACK */ 1042 1043 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1044 int flags, struct trace_event *event) 1045 { 1046 struct stack_entry *field; 1047 struct trace_seq *s = &iter->seq; 1048 unsigned long *p; 1049 unsigned long *end; 1050 1051 trace_assign_type(field, iter->ent); 1052 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1053 1054 trace_seq_puts(s, "<stack trace>\n"); 1055 1056 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1057 1058 if (trace_seq_has_overflowed(s)) 1059 break; 1060 1061 trace_seq_puts(s, " => "); 1062 seq_print_ip_sym(s, *p, flags); 1063 trace_seq_putc(s, '\n'); 1064 } 1065 1066 return trace_handle_return(s); 1067 } 1068 1069 static struct trace_event_functions trace_stack_funcs = { 1070 .trace = trace_stack_print, 1071 }; 1072 1073 static struct trace_event trace_stack_event = { 1074 .type = TRACE_STACK, 1075 .funcs = &trace_stack_funcs, 1076 }; 1077 1078 /* TRACE_USER_STACK */ 1079 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1080 int flags, struct trace_event *event) 1081 { 1082 struct userstack_entry *field; 1083 struct trace_seq *s = &iter->seq; 1084 1085 trace_assign_type(field, iter->ent); 1086 1087 trace_seq_puts(s, "<user stack trace>\n"); 1088 seq_print_userip_objs(field, s, flags); 1089 1090 return trace_handle_return(s); 1091 } 1092 1093 static struct trace_event_functions trace_user_stack_funcs = { 1094 .trace = trace_user_stack_print, 1095 }; 1096 1097 static struct trace_event trace_user_stack_event = { 1098 .type = TRACE_USER_STACK, 1099 .funcs = &trace_user_stack_funcs, 1100 }; 1101 1102 /* TRACE_BPUTS */ 1103 static enum print_line_t 1104 trace_bputs_print(struct trace_iterator *iter, int flags, 1105 struct trace_event *event) 1106 { 1107 struct trace_entry *entry = iter->ent; 1108 struct trace_seq *s = &iter->seq; 1109 struct bputs_entry *field; 1110 1111 trace_assign_type(field, entry); 1112 1113 seq_print_ip_sym(s, field->ip, flags); 1114 trace_seq_puts(s, ": "); 1115 trace_seq_puts(s, field->str); 1116 1117 return trace_handle_return(s); 1118 } 1119 1120 1121 static enum print_line_t 1122 trace_bputs_raw(struct trace_iterator *iter, int flags, 1123 struct trace_event *event) 1124 { 1125 struct bputs_entry *field; 1126 struct trace_seq *s = &iter->seq; 1127 1128 trace_assign_type(field, iter->ent); 1129 1130 trace_seq_printf(s, ": %lx : ", field->ip); 1131 trace_seq_puts(s, field->str); 1132 1133 return trace_handle_return(s); 1134 } 1135 1136 static struct trace_event_functions trace_bputs_funcs = { 1137 .trace = trace_bputs_print, 1138 .raw = trace_bputs_raw, 1139 }; 1140 1141 static struct trace_event trace_bputs_event = { 1142 .type = TRACE_BPUTS, 1143 .funcs = &trace_bputs_funcs, 1144 }; 1145 1146 /* TRACE_BPRINT */ 1147 static enum print_line_t 1148 trace_bprint_print(struct trace_iterator *iter, int flags, 1149 struct trace_event *event) 1150 { 1151 struct trace_entry *entry = iter->ent; 1152 struct trace_seq *s = &iter->seq; 1153 struct bprint_entry *field; 1154 1155 trace_assign_type(field, entry); 1156 1157 seq_print_ip_sym(s, field->ip, flags); 1158 trace_seq_puts(s, ": "); 1159 trace_seq_bprintf(s, field->fmt, field->buf); 1160 1161 return trace_handle_return(s); 1162 } 1163 1164 1165 static enum print_line_t 1166 trace_bprint_raw(struct trace_iterator *iter, int flags, 1167 struct trace_event *event) 1168 { 1169 struct bprint_entry *field; 1170 struct trace_seq *s = &iter->seq; 1171 1172 trace_assign_type(field, iter->ent); 1173 1174 trace_seq_printf(s, ": %lx : ", field->ip); 1175 trace_seq_bprintf(s, field->fmt, field->buf); 1176 1177 return trace_handle_return(s); 1178 } 1179 1180 static struct trace_event_functions trace_bprint_funcs = { 1181 .trace = trace_bprint_print, 1182 .raw = trace_bprint_raw, 1183 }; 1184 1185 static struct trace_event trace_bprint_event = { 1186 .type = TRACE_BPRINT, 1187 .funcs = &trace_bprint_funcs, 1188 }; 1189 1190 /* TRACE_PRINT */ 1191 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1192 int flags, struct trace_event *event) 1193 { 1194 struct print_entry *field; 1195 struct trace_seq *s = &iter->seq; 1196 1197 trace_assign_type(field, iter->ent); 1198 1199 seq_print_ip_sym(s, field->ip, flags); 1200 trace_seq_printf(s, ": %s", field->buf); 1201 1202 return trace_handle_return(s); 1203 } 1204 1205 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1206 struct trace_event *event) 1207 { 1208 struct print_entry *field; 1209 1210 trace_assign_type(field, iter->ent); 1211 1212 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); 1213 1214 return trace_handle_return(&iter->seq); 1215 } 1216 1217 static struct trace_event_functions trace_print_funcs = { 1218 .trace = trace_print_print, 1219 .raw = trace_print_raw, 1220 }; 1221 1222 static struct trace_event trace_print_event = { 1223 .type = TRACE_PRINT, 1224 .funcs = &trace_print_funcs, 1225 }; 1226 1227 1228 static struct trace_event *events[] __initdata = { 1229 &trace_fn_event, 1230 &trace_ctx_event, 1231 &trace_wake_event, 1232 &trace_stack_event, 1233 &trace_user_stack_event, 1234 &trace_bputs_event, 1235 &trace_bprint_event, 1236 &trace_print_event, 1237 NULL 1238 }; 1239 1240 __init static int init_events(void) 1241 { 1242 struct trace_event *event; 1243 int i, ret; 1244 1245 for (i = 0; events[i]; i++) { 1246 event = events[i]; 1247 1248 ret = register_trace_event(event); 1249 if (!ret) { 1250 printk(KERN_WARNING "event %d failed to register\n", 1251 event->type); 1252 WARN_ON_ONCE(1); 1253 } 1254 } 1255 1256 return 0; 1257 } 1258 early_initcall(init_events); 1259