1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_sem); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 24 { 25 struct trace_seq *s = &iter->seq; 26 struct trace_entry *entry = iter->ent; 27 struct bputs_entry *field; 28 29 trace_assign_type(field, entry); 30 31 trace_seq_puts(s, field->str); 32 33 return trace_handle_return(s); 34 } 35 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 37 { 38 struct trace_seq *s = &iter->seq; 39 struct trace_entry *entry = iter->ent; 40 struct bprint_entry *field; 41 42 trace_assign_type(field, entry); 43 44 trace_seq_bprintf(s, field->fmt, field->buf); 45 46 return trace_handle_return(s); 47 } 48 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 50 { 51 struct trace_seq *s = &iter->seq; 52 struct trace_entry *entry = iter->ent; 53 struct print_entry *field; 54 55 trace_assign_type(field, entry); 56 57 trace_seq_puts(s, field->buf); 58 59 return trace_handle_return(s); 60 } 61 62 const char * 63 trace_print_flags_seq(struct trace_seq *p, const char *delim, 64 unsigned long flags, 65 const struct trace_print_flags *flag_array) 66 { 67 unsigned long mask; 68 const char *str; 69 const char *ret = trace_seq_buffer_ptr(p); 70 int i, first = 1; 71 72 for (i = 0; flag_array[i].name && flags; i++) { 73 74 mask = flag_array[i].mask; 75 if ((flags & mask) != mask) 76 continue; 77 78 str = flag_array[i].name; 79 flags &= ~mask; 80 if (!first && delim) 81 trace_seq_puts(p, delim); 82 else 83 first = 0; 84 trace_seq_puts(p, str); 85 } 86 87 /* check for left over flags */ 88 if (flags) { 89 if (!first && delim) 90 trace_seq_puts(p, delim); 91 trace_seq_printf(p, "0x%lx", flags); 92 } 93 94 trace_seq_putc(p, 0); 95 96 return ret; 97 } 98 EXPORT_SYMBOL(trace_print_flags_seq); 99 100 const char * 101 trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 const struct trace_print_flags *symbol_array) 103 { 104 int i; 105 const char *ret = trace_seq_buffer_ptr(p); 106 107 for (i = 0; symbol_array[i].name; i++) { 108 109 if (val != symbol_array[i].mask) 110 continue; 111 112 trace_seq_puts(p, symbol_array[i].name); 113 break; 114 } 115 116 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 117 trace_seq_printf(p, "0x%lx", val); 118 119 trace_seq_putc(p, 0); 120 121 return ret; 122 } 123 EXPORT_SYMBOL(trace_print_symbols_seq); 124 125 #if BITS_PER_LONG == 32 126 const char * 127 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 128 const struct trace_print_flags_u64 *symbol_array) 129 { 130 int i; 131 const char *ret = trace_seq_buffer_ptr(p); 132 133 for (i = 0; symbol_array[i].name; i++) { 134 135 if (val != symbol_array[i].mask) 136 continue; 137 138 trace_seq_puts(p, symbol_array[i].name); 139 break; 140 } 141 142 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 143 trace_seq_printf(p, "0x%llx", val); 144 145 trace_seq_putc(p, 0); 146 147 return ret; 148 } 149 EXPORT_SYMBOL(trace_print_symbols_seq_u64); 150 #endif 151 152 const char * 153 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 unsigned int bitmask_size) 155 { 156 const char *ret = trace_seq_buffer_ptr(p); 157 158 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 159 trace_seq_putc(p, 0); 160 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); 164 165 const char * 166 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 167 { 168 int i; 169 const char *ret = trace_seq_buffer_ptr(p); 170 171 for (i = 0; i < buf_len; i++) 172 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 173 174 trace_seq_putc(p, 0); 175 176 return ret; 177 } 178 EXPORT_SYMBOL(trace_print_hex_seq); 179 180 const char * 181 trace_print_array_seq(struct trace_seq *p, const void *buf, int count, 182 size_t el_size) 183 { 184 const char *ret = trace_seq_buffer_ptr(p); 185 const char *prefix = ""; 186 void *ptr = (void *)buf; 187 size_t buf_len = count * el_size; 188 189 trace_seq_putc(p, '{'); 190 191 while (ptr < buf + buf_len) { 192 switch (el_size) { 193 case 1: 194 trace_seq_printf(p, "%s0x%x", prefix, 195 *(u8 *)ptr); 196 break; 197 case 2: 198 trace_seq_printf(p, "%s0x%x", prefix, 199 *(u16 *)ptr); 200 break; 201 case 4: 202 trace_seq_printf(p, "%s0x%x", prefix, 203 *(u32 *)ptr); 204 break; 205 case 8: 206 trace_seq_printf(p, "%s0x%llx", prefix, 207 *(u64 *)ptr); 208 break; 209 default: 210 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size, 211 *(u8 *)ptr); 212 el_size = 1; 213 } 214 prefix = ","; 215 ptr += el_size; 216 } 217 218 trace_seq_putc(p, '}'); 219 trace_seq_putc(p, 0); 220 221 return ret; 222 } 223 EXPORT_SYMBOL(trace_print_array_seq); 224 225 int trace_raw_output_prep(struct trace_iterator *iter, 226 struct trace_event *trace_event) 227 { 228 struct trace_event_call *event; 229 struct trace_seq *s = &iter->seq; 230 struct trace_seq *p = &iter->tmp_seq; 231 struct trace_entry *entry; 232 233 event = container_of(trace_event, struct trace_event_call, event); 234 entry = iter->ent; 235 236 if (entry->type != event->event.type) { 237 WARN_ON_ONCE(1); 238 return TRACE_TYPE_UNHANDLED; 239 } 240 241 trace_seq_init(p); 242 trace_seq_printf(s, "%s: ", trace_event_name(event)); 243 244 return trace_handle_return(s); 245 } 246 EXPORT_SYMBOL(trace_raw_output_prep); 247 248 static int trace_output_raw(struct trace_iterator *iter, char *name, 249 char *fmt, va_list ap) 250 { 251 struct trace_seq *s = &iter->seq; 252 253 trace_seq_printf(s, "%s: ", name); 254 trace_seq_vprintf(s, fmt, ap); 255 256 return trace_handle_return(s); 257 } 258 259 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 260 { 261 va_list ap; 262 int ret; 263 264 va_start(ap, fmt); 265 ret = trace_output_raw(iter, name, fmt, ap); 266 va_end(ap); 267 268 return ret; 269 } 270 EXPORT_SYMBOL_GPL(trace_output_call); 271 272 #ifdef CONFIG_KRETPROBES 273 static inline const char *kretprobed(const char *name) 274 { 275 static const char tramp_name[] = "kretprobe_trampoline"; 276 int size = sizeof(tramp_name); 277 278 if (strncmp(tramp_name, name, size) == 0) 279 return "[unknown/kretprobe'd]"; 280 return name; 281 } 282 #else 283 static inline const char *kretprobed(const char *name) 284 { 285 return name; 286 } 287 #endif /* CONFIG_KRETPROBES */ 288 289 static void 290 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 291 { 292 #ifdef CONFIG_KALLSYMS 293 char str[KSYM_SYMBOL_LEN]; 294 const char *name; 295 296 kallsyms_lookup(address, NULL, NULL, NULL, str); 297 298 name = kretprobed(str); 299 300 trace_seq_printf(s, fmt, name); 301 #endif 302 } 303 304 static void 305 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 306 unsigned long address) 307 { 308 #ifdef CONFIG_KALLSYMS 309 char str[KSYM_SYMBOL_LEN]; 310 const char *name; 311 312 sprint_symbol(str, address); 313 name = kretprobed(str); 314 315 trace_seq_printf(s, fmt, name); 316 #endif 317 } 318 319 #ifndef CONFIG_64BIT 320 # define IP_FMT "%08lx" 321 #else 322 # define IP_FMT "%016lx" 323 #endif 324 325 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 326 unsigned long ip, unsigned long sym_flags) 327 { 328 struct file *file = NULL; 329 unsigned long vmstart = 0; 330 int ret = 1; 331 332 if (s->full) 333 return 0; 334 335 if (mm) { 336 const struct vm_area_struct *vma; 337 338 down_read(&mm->mmap_sem); 339 vma = find_vma(mm, ip); 340 if (vma) { 341 file = vma->vm_file; 342 vmstart = vma->vm_start; 343 } 344 if (file) { 345 ret = trace_seq_path(s, &file->f_path); 346 if (ret) 347 trace_seq_printf(s, "[+0x%lx]", 348 ip - vmstart); 349 } 350 up_read(&mm->mmap_sem); 351 } 352 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 353 trace_seq_printf(s, " <" IP_FMT ">", ip); 354 return !trace_seq_has_overflowed(s); 355 } 356 357 int 358 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 359 { 360 if (!ip) { 361 trace_seq_putc(s, '0'); 362 goto out; 363 } 364 365 if (sym_flags & TRACE_ITER_SYM_OFFSET) 366 seq_print_sym_offset(s, "%s", ip); 367 else 368 seq_print_sym_short(s, "%s", ip); 369 370 if (sym_flags & TRACE_ITER_SYM_ADDR) 371 trace_seq_printf(s, " <" IP_FMT ">", ip); 372 373 out: 374 return !trace_seq_has_overflowed(s); 375 } 376 377 /** 378 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 379 * @s: trace seq struct to write to 380 * @entry: The trace entry field from the ring buffer 381 * 382 * Prints the generic fields of irqs off, in hard or softirq, preempt 383 * count. 384 */ 385 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 386 { 387 char hardsoft_irq; 388 char need_resched; 389 char irqs_off; 390 int hardirq; 391 int softirq; 392 int nmi; 393 394 nmi = entry->flags & TRACE_FLAG_NMI; 395 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 396 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 397 398 irqs_off = 399 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 400 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 401 '.'; 402 403 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | 404 TRACE_FLAG_PREEMPT_RESCHED)) { 405 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: 406 need_resched = 'N'; 407 break; 408 case TRACE_FLAG_NEED_RESCHED: 409 need_resched = 'n'; 410 break; 411 case TRACE_FLAG_PREEMPT_RESCHED: 412 need_resched = 'p'; 413 break; 414 default: 415 need_resched = '.'; 416 break; 417 } 418 419 hardsoft_irq = 420 (nmi && hardirq) ? 'Z' : 421 nmi ? 'z' : 422 (hardirq && softirq) ? 'H' : 423 hardirq ? 'h' : 424 softirq ? 's' : 425 '.' ; 426 427 trace_seq_printf(s, "%c%c%c", 428 irqs_off, need_resched, hardsoft_irq); 429 430 if (entry->preempt_count) 431 trace_seq_printf(s, "%x", entry->preempt_count); 432 else 433 trace_seq_putc(s, '.'); 434 435 return !trace_seq_has_overflowed(s); 436 } 437 438 static int 439 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 440 { 441 char comm[TASK_COMM_LEN]; 442 443 trace_find_cmdline(entry->pid, comm); 444 445 trace_seq_printf(s, "%8.8s-%-5d %3d", 446 comm, entry->pid, cpu); 447 448 return trace_print_lat_fmt(s, entry); 449 } 450 451 #undef MARK 452 #define MARK(v, s) {.val = v, .sym = s} 453 /* trace overhead mark */ 454 static const struct trace_mark { 455 unsigned long long val; /* unit: nsec */ 456 char sym; 457 } mark[] = { 458 MARK(1000000000ULL , '$'), /* 1 sec */ 459 MARK(100000000ULL , '@'), /* 100 msec */ 460 MARK(10000000ULL , '*'), /* 10 msec */ 461 MARK(1000000ULL , '#'), /* 1000 usecs */ 462 MARK(100000ULL , '!'), /* 100 usecs */ 463 MARK(10000ULL , '+'), /* 10 usecs */ 464 }; 465 #undef MARK 466 467 char trace_find_mark(unsigned long long d) 468 { 469 int i; 470 int size = ARRAY_SIZE(mark); 471 472 for (i = 0; i < size; i++) { 473 if (d > mark[i].val) 474 break; 475 } 476 477 return (i == size) ? ' ' : mark[i].sym; 478 } 479 480 static int 481 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 482 { 483 struct trace_array *tr = iter->tr; 484 unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE; 485 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 486 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 487 unsigned long long rel_ts = next_ts - iter->ts; 488 struct trace_seq *s = &iter->seq; 489 490 if (in_ns) { 491 abs_ts = ns2usecs(abs_ts); 492 rel_ts = ns2usecs(rel_ts); 493 } 494 495 if (verbose && in_ns) { 496 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 497 unsigned long abs_msec = (unsigned long)abs_ts; 498 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 499 unsigned long rel_msec = (unsigned long)rel_ts; 500 501 trace_seq_printf( 502 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 503 ns2usecs(iter->ts), 504 abs_msec, abs_usec, 505 rel_msec, rel_usec); 506 507 } else if (verbose && !in_ns) { 508 trace_seq_printf( 509 s, "[%016llx] %lld (+%lld): ", 510 iter->ts, abs_ts, rel_ts); 511 512 } else if (!verbose && in_ns) { 513 trace_seq_printf( 514 s, " %4lldus%c: ", 515 abs_ts, 516 trace_find_mark(rel_ts * NSEC_PER_USEC)); 517 518 } else { /* !verbose && !in_ns */ 519 trace_seq_printf(s, " %4lld: ", abs_ts); 520 } 521 522 return !trace_seq_has_overflowed(s); 523 } 524 525 int trace_print_context(struct trace_iterator *iter) 526 { 527 struct trace_array *tr = iter->tr; 528 struct trace_seq *s = &iter->seq; 529 struct trace_entry *entry = iter->ent; 530 unsigned long long t; 531 unsigned long secs, usec_rem; 532 char comm[TASK_COMM_LEN]; 533 534 trace_find_cmdline(entry->pid, comm); 535 536 trace_seq_printf(s, "%16s-%-5d [%03d] ", 537 comm, entry->pid, iter->cpu); 538 539 if (tr->trace_flags & TRACE_ITER_IRQ_INFO) 540 trace_print_lat_fmt(s, entry); 541 542 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 543 t = ns2usecs(iter->ts); 544 usec_rem = do_div(t, USEC_PER_SEC); 545 secs = (unsigned long)t; 546 trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 547 } else 548 trace_seq_printf(s, " %12llu: ", iter->ts); 549 550 return !trace_seq_has_overflowed(s); 551 } 552 553 int trace_print_lat_context(struct trace_iterator *iter) 554 { 555 struct trace_array *tr = iter->tr; 556 /* trace_find_next_entry will reset ent_size */ 557 int ent_size = iter->ent_size; 558 struct trace_seq *s = &iter->seq; 559 u64 next_ts; 560 struct trace_entry *entry = iter->ent, 561 *next_entry = trace_find_next_entry(iter, NULL, 562 &next_ts); 563 unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE); 564 565 /* Restore the original ent_size */ 566 iter->ent_size = ent_size; 567 568 if (!next_entry) 569 next_ts = iter->ts; 570 571 if (verbose) { 572 char comm[TASK_COMM_LEN]; 573 574 trace_find_cmdline(entry->pid, comm); 575 576 trace_seq_printf( 577 s, "%16s %5d %3d %d %08x %08lx ", 578 comm, entry->pid, iter->cpu, entry->flags, 579 entry->preempt_count, iter->idx); 580 } else { 581 lat_print_generic(s, entry, iter->cpu); 582 } 583 584 lat_print_timestamp(iter, next_ts); 585 586 return !trace_seq_has_overflowed(s); 587 } 588 589 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 590 591 static int task_state_char(unsigned long state) 592 { 593 int bit = state ? __ffs(state) + 1 : 0; 594 595 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 596 } 597 598 /** 599 * ftrace_find_event - find a registered event 600 * @type: the type of event to look for 601 * 602 * Returns an event of type @type otherwise NULL 603 * Called with trace_event_read_lock() held. 604 */ 605 struct trace_event *ftrace_find_event(int type) 606 { 607 struct trace_event *event; 608 unsigned key; 609 610 key = type & (EVENT_HASHSIZE - 1); 611 612 hlist_for_each_entry(event, &event_hash[key], node) { 613 if (event->type == type) 614 return event; 615 } 616 617 return NULL; 618 } 619 620 static LIST_HEAD(ftrace_event_list); 621 622 static int trace_search_list(struct list_head **list) 623 { 624 struct trace_event *e; 625 int last = __TRACE_LAST_TYPE; 626 627 if (list_empty(&ftrace_event_list)) { 628 *list = &ftrace_event_list; 629 return last + 1; 630 } 631 632 /* 633 * We used up all possible max events, 634 * lets see if somebody freed one. 635 */ 636 list_for_each_entry(e, &ftrace_event_list, list) { 637 if (e->type != last + 1) 638 break; 639 last++; 640 } 641 642 /* Did we used up all 65 thousand events??? */ 643 if ((last + 1) > TRACE_EVENT_TYPE_MAX) 644 return 0; 645 646 *list = &e->list; 647 return last + 1; 648 } 649 650 void trace_event_read_lock(void) 651 { 652 down_read(&trace_event_sem); 653 } 654 655 void trace_event_read_unlock(void) 656 { 657 up_read(&trace_event_sem); 658 } 659 660 /** 661 * register_trace_event - register output for an event type 662 * @event: the event type to register 663 * 664 * Event types are stored in a hash and this hash is used to 665 * find a way to print an event. If the @event->type is set 666 * then it will use that type, otherwise it will assign a 667 * type to use. 668 * 669 * If you assign your own type, please make sure it is added 670 * to the trace_type enum in trace.h, to avoid collisions 671 * with the dynamic types. 672 * 673 * Returns the event type number or zero on error. 674 */ 675 int register_trace_event(struct trace_event *event) 676 { 677 unsigned key; 678 int ret = 0; 679 680 down_write(&trace_event_sem); 681 682 if (WARN_ON(!event)) 683 goto out; 684 685 if (WARN_ON(!event->funcs)) 686 goto out; 687 688 INIT_LIST_HEAD(&event->list); 689 690 if (!event->type) { 691 struct list_head *list = NULL; 692 693 if (next_event_type > TRACE_EVENT_TYPE_MAX) { 694 695 event->type = trace_search_list(&list); 696 if (!event->type) 697 goto out; 698 699 } else { 700 701 event->type = next_event_type++; 702 list = &ftrace_event_list; 703 } 704 705 if (WARN_ON(ftrace_find_event(event->type))) 706 goto out; 707 708 list_add_tail(&event->list, list); 709 710 } else if (event->type > __TRACE_LAST_TYPE) { 711 printk(KERN_WARNING "Need to add type to trace.h\n"); 712 WARN_ON(1); 713 goto out; 714 } else { 715 /* Is this event already used */ 716 if (ftrace_find_event(event->type)) 717 goto out; 718 } 719 720 if (event->funcs->trace == NULL) 721 event->funcs->trace = trace_nop_print; 722 if (event->funcs->raw == NULL) 723 event->funcs->raw = trace_nop_print; 724 if (event->funcs->hex == NULL) 725 event->funcs->hex = trace_nop_print; 726 if (event->funcs->binary == NULL) 727 event->funcs->binary = trace_nop_print; 728 729 key = event->type & (EVENT_HASHSIZE - 1); 730 731 hlist_add_head(&event->node, &event_hash[key]); 732 733 ret = event->type; 734 out: 735 up_write(&trace_event_sem); 736 737 return ret; 738 } 739 EXPORT_SYMBOL_GPL(register_trace_event); 740 741 /* 742 * Used by module code with the trace_event_sem held for write. 743 */ 744 int __unregister_trace_event(struct trace_event *event) 745 { 746 hlist_del(&event->node); 747 list_del(&event->list); 748 return 0; 749 } 750 751 /** 752 * unregister_trace_event - remove a no longer used event 753 * @event: the event to remove 754 */ 755 int unregister_trace_event(struct trace_event *event) 756 { 757 down_write(&trace_event_sem); 758 __unregister_trace_event(event); 759 up_write(&trace_event_sem); 760 761 return 0; 762 } 763 EXPORT_SYMBOL_GPL(unregister_trace_event); 764 765 /* 766 * Standard events 767 */ 768 769 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 770 struct trace_event *event) 771 { 772 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); 773 774 return trace_handle_return(&iter->seq); 775 } 776 777 /* TRACE_FN */ 778 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 779 struct trace_event *event) 780 { 781 struct ftrace_entry *field; 782 struct trace_seq *s = &iter->seq; 783 784 trace_assign_type(field, iter->ent); 785 786 seq_print_ip_sym(s, field->ip, flags); 787 788 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 789 trace_seq_puts(s, " <-"); 790 seq_print_ip_sym(s, field->parent_ip, flags); 791 } 792 793 trace_seq_putc(s, '\n'); 794 795 return trace_handle_return(s); 796 } 797 798 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 799 struct trace_event *event) 800 { 801 struct ftrace_entry *field; 802 803 trace_assign_type(field, iter->ent); 804 805 trace_seq_printf(&iter->seq, "%lx %lx\n", 806 field->ip, 807 field->parent_ip); 808 809 return trace_handle_return(&iter->seq); 810 } 811 812 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 813 struct trace_event *event) 814 { 815 struct ftrace_entry *field; 816 struct trace_seq *s = &iter->seq; 817 818 trace_assign_type(field, iter->ent); 819 820 SEQ_PUT_HEX_FIELD(s, field->ip); 821 SEQ_PUT_HEX_FIELD(s, field->parent_ip); 822 823 return trace_handle_return(s); 824 } 825 826 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 827 struct trace_event *event) 828 { 829 struct ftrace_entry *field; 830 struct trace_seq *s = &iter->seq; 831 832 trace_assign_type(field, iter->ent); 833 834 SEQ_PUT_FIELD(s, field->ip); 835 SEQ_PUT_FIELD(s, field->parent_ip); 836 837 return trace_handle_return(s); 838 } 839 840 static struct trace_event_functions trace_fn_funcs = { 841 .trace = trace_fn_trace, 842 .raw = trace_fn_raw, 843 .hex = trace_fn_hex, 844 .binary = trace_fn_bin, 845 }; 846 847 static struct trace_event trace_fn_event = { 848 .type = TRACE_FN, 849 .funcs = &trace_fn_funcs, 850 }; 851 852 /* TRACE_CTX an TRACE_WAKE */ 853 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 854 char *delim) 855 { 856 struct ctx_switch_entry *field; 857 char comm[TASK_COMM_LEN]; 858 int S, T; 859 860 861 trace_assign_type(field, iter->ent); 862 863 T = task_state_char(field->next_state); 864 S = task_state_char(field->prev_state); 865 trace_find_cmdline(field->next_pid, comm); 866 trace_seq_printf(&iter->seq, 867 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 868 field->prev_pid, 869 field->prev_prio, 870 S, delim, 871 field->next_cpu, 872 field->next_pid, 873 field->next_prio, 874 T, comm); 875 876 return trace_handle_return(&iter->seq); 877 } 878 879 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 880 struct trace_event *event) 881 { 882 return trace_ctxwake_print(iter, "==>"); 883 } 884 885 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 886 int flags, struct trace_event *event) 887 { 888 return trace_ctxwake_print(iter, " +"); 889 } 890 891 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 892 { 893 struct ctx_switch_entry *field; 894 int T; 895 896 trace_assign_type(field, iter->ent); 897 898 if (!S) 899 S = task_state_char(field->prev_state); 900 T = task_state_char(field->next_state); 901 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 902 field->prev_pid, 903 field->prev_prio, 904 S, 905 field->next_cpu, 906 field->next_pid, 907 field->next_prio, 908 T); 909 910 return trace_handle_return(&iter->seq); 911 } 912 913 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 914 struct trace_event *event) 915 { 916 return trace_ctxwake_raw(iter, 0); 917 } 918 919 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 920 struct trace_event *event) 921 { 922 return trace_ctxwake_raw(iter, '+'); 923 } 924 925 926 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 927 { 928 struct ctx_switch_entry *field; 929 struct trace_seq *s = &iter->seq; 930 int T; 931 932 trace_assign_type(field, iter->ent); 933 934 if (!S) 935 S = task_state_char(field->prev_state); 936 T = task_state_char(field->next_state); 937 938 SEQ_PUT_HEX_FIELD(s, field->prev_pid); 939 SEQ_PUT_HEX_FIELD(s, field->prev_prio); 940 SEQ_PUT_HEX_FIELD(s, S); 941 SEQ_PUT_HEX_FIELD(s, field->next_cpu); 942 SEQ_PUT_HEX_FIELD(s, field->next_pid); 943 SEQ_PUT_HEX_FIELD(s, field->next_prio); 944 SEQ_PUT_HEX_FIELD(s, T); 945 946 return trace_handle_return(s); 947 } 948 949 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 950 struct trace_event *event) 951 { 952 return trace_ctxwake_hex(iter, 0); 953 } 954 955 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 956 struct trace_event *event) 957 { 958 return trace_ctxwake_hex(iter, '+'); 959 } 960 961 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 962 int flags, struct trace_event *event) 963 { 964 struct ctx_switch_entry *field; 965 struct trace_seq *s = &iter->seq; 966 967 trace_assign_type(field, iter->ent); 968 969 SEQ_PUT_FIELD(s, field->prev_pid); 970 SEQ_PUT_FIELD(s, field->prev_prio); 971 SEQ_PUT_FIELD(s, field->prev_state); 972 SEQ_PUT_FIELD(s, field->next_cpu); 973 SEQ_PUT_FIELD(s, field->next_pid); 974 SEQ_PUT_FIELD(s, field->next_prio); 975 SEQ_PUT_FIELD(s, field->next_state); 976 977 return trace_handle_return(s); 978 } 979 980 static struct trace_event_functions trace_ctx_funcs = { 981 .trace = trace_ctx_print, 982 .raw = trace_ctx_raw, 983 .hex = trace_ctx_hex, 984 .binary = trace_ctxwake_bin, 985 }; 986 987 static struct trace_event trace_ctx_event = { 988 .type = TRACE_CTX, 989 .funcs = &trace_ctx_funcs, 990 }; 991 992 static struct trace_event_functions trace_wake_funcs = { 993 .trace = trace_wake_print, 994 .raw = trace_wake_raw, 995 .hex = trace_wake_hex, 996 .binary = trace_ctxwake_bin, 997 }; 998 999 static struct trace_event trace_wake_event = { 1000 .type = TRACE_WAKE, 1001 .funcs = &trace_wake_funcs, 1002 }; 1003 1004 /* TRACE_STACK */ 1005 1006 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1007 int flags, struct trace_event *event) 1008 { 1009 struct stack_entry *field; 1010 struct trace_seq *s = &iter->seq; 1011 unsigned long *p; 1012 unsigned long *end; 1013 1014 trace_assign_type(field, iter->ent); 1015 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1016 1017 trace_seq_puts(s, "<stack trace>\n"); 1018 1019 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1020 1021 if (trace_seq_has_overflowed(s)) 1022 break; 1023 1024 trace_seq_puts(s, " => "); 1025 seq_print_ip_sym(s, *p, flags); 1026 trace_seq_putc(s, '\n'); 1027 } 1028 1029 return trace_handle_return(s); 1030 } 1031 1032 static struct trace_event_functions trace_stack_funcs = { 1033 .trace = trace_stack_print, 1034 }; 1035 1036 static struct trace_event trace_stack_event = { 1037 .type = TRACE_STACK, 1038 .funcs = &trace_stack_funcs, 1039 }; 1040 1041 /* TRACE_USER_STACK */ 1042 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1043 int flags, struct trace_event *event) 1044 { 1045 struct trace_array *tr = iter->tr; 1046 struct userstack_entry *field; 1047 struct trace_seq *s = &iter->seq; 1048 struct mm_struct *mm = NULL; 1049 unsigned int i; 1050 1051 trace_assign_type(field, iter->ent); 1052 1053 trace_seq_puts(s, "<user stack trace>\n"); 1054 1055 if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) { 1056 struct task_struct *task; 1057 /* 1058 * we do the lookup on the thread group leader, 1059 * since individual threads might have already quit! 1060 */ 1061 rcu_read_lock(); 1062 task = find_task_by_vpid(field->tgid); 1063 if (task) 1064 mm = get_task_mm(task); 1065 rcu_read_unlock(); 1066 } 1067 1068 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1069 unsigned long ip = field->caller[i]; 1070 1071 if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 1072 break; 1073 1074 trace_seq_puts(s, " => "); 1075 1076 if (!ip) { 1077 trace_seq_puts(s, "??"); 1078 trace_seq_putc(s, '\n'); 1079 continue; 1080 } 1081 1082 seq_print_user_ip(s, mm, ip, flags); 1083 trace_seq_putc(s, '\n'); 1084 } 1085 1086 if (mm) 1087 mmput(mm); 1088 1089 return trace_handle_return(s); 1090 } 1091 1092 static struct trace_event_functions trace_user_stack_funcs = { 1093 .trace = trace_user_stack_print, 1094 }; 1095 1096 static struct trace_event trace_user_stack_event = { 1097 .type = TRACE_USER_STACK, 1098 .funcs = &trace_user_stack_funcs, 1099 }; 1100 1101 /* TRACE_HWLAT */ 1102 static enum print_line_t 1103 trace_hwlat_print(struct trace_iterator *iter, int flags, 1104 struct trace_event *event) 1105 { 1106 struct trace_entry *entry = iter->ent; 1107 struct trace_seq *s = &iter->seq; 1108 struct hwlat_entry *field; 1109 1110 trace_assign_type(field, entry); 1111 1112 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld", 1113 field->seqnum, 1114 field->duration, 1115 field->outer_duration, 1116 field->timestamp.tv_sec, 1117 field->timestamp.tv_nsec); 1118 1119 if (field->nmi_count) { 1120 /* 1121 * The generic sched_clock() is not NMI safe, thus 1122 * we only record the count and not the time. 1123 */ 1124 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) 1125 trace_seq_printf(s, " nmi-total:%llu", 1126 field->nmi_total_ts); 1127 trace_seq_printf(s, " nmi-count:%u", 1128 field->nmi_count); 1129 } 1130 1131 trace_seq_putc(s, '\n'); 1132 1133 return trace_handle_return(s); 1134 } 1135 1136 1137 static enum print_line_t 1138 trace_hwlat_raw(struct trace_iterator *iter, int flags, 1139 struct trace_event *event) 1140 { 1141 struct hwlat_entry *field; 1142 struct trace_seq *s = &iter->seq; 1143 1144 trace_assign_type(field, iter->ent); 1145 1146 trace_seq_printf(s, "%llu %lld %ld %09ld %u\n", 1147 field->duration, 1148 field->outer_duration, 1149 field->timestamp.tv_sec, 1150 field->timestamp.tv_nsec, 1151 field->seqnum); 1152 1153 return trace_handle_return(s); 1154 } 1155 1156 static struct trace_event_functions trace_hwlat_funcs = { 1157 .trace = trace_hwlat_print, 1158 .raw = trace_hwlat_raw, 1159 }; 1160 1161 static struct trace_event trace_hwlat_event = { 1162 .type = TRACE_HWLAT, 1163 .funcs = &trace_hwlat_funcs, 1164 }; 1165 1166 /* TRACE_BPUTS */ 1167 static enum print_line_t 1168 trace_bputs_print(struct trace_iterator *iter, int flags, 1169 struct trace_event *event) 1170 { 1171 struct trace_entry *entry = iter->ent; 1172 struct trace_seq *s = &iter->seq; 1173 struct bputs_entry *field; 1174 1175 trace_assign_type(field, entry); 1176 1177 seq_print_ip_sym(s, field->ip, flags); 1178 trace_seq_puts(s, ": "); 1179 trace_seq_puts(s, field->str); 1180 1181 return trace_handle_return(s); 1182 } 1183 1184 1185 static enum print_line_t 1186 trace_bputs_raw(struct trace_iterator *iter, int flags, 1187 struct trace_event *event) 1188 { 1189 struct bputs_entry *field; 1190 struct trace_seq *s = &iter->seq; 1191 1192 trace_assign_type(field, iter->ent); 1193 1194 trace_seq_printf(s, ": %lx : ", field->ip); 1195 trace_seq_puts(s, field->str); 1196 1197 return trace_handle_return(s); 1198 } 1199 1200 static struct trace_event_functions trace_bputs_funcs = { 1201 .trace = trace_bputs_print, 1202 .raw = trace_bputs_raw, 1203 }; 1204 1205 static struct trace_event trace_bputs_event = { 1206 .type = TRACE_BPUTS, 1207 .funcs = &trace_bputs_funcs, 1208 }; 1209 1210 /* TRACE_BPRINT */ 1211 static enum print_line_t 1212 trace_bprint_print(struct trace_iterator *iter, int flags, 1213 struct trace_event *event) 1214 { 1215 struct trace_entry *entry = iter->ent; 1216 struct trace_seq *s = &iter->seq; 1217 struct bprint_entry *field; 1218 1219 trace_assign_type(field, entry); 1220 1221 seq_print_ip_sym(s, field->ip, flags); 1222 trace_seq_puts(s, ": "); 1223 trace_seq_bprintf(s, field->fmt, field->buf); 1224 1225 return trace_handle_return(s); 1226 } 1227 1228 1229 static enum print_line_t 1230 trace_bprint_raw(struct trace_iterator *iter, int flags, 1231 struct trace_event *event) 1232 { 1233 struct bprint_entry *field; 1234 struct trace_seq *s = &iter->seq; 1235 1236 trace_assign_type(field, iter->ent); 1237 1238 trace_seq_printf(s, ": %lx : ", field->ip); 1239 trace_seq_bprintf(s, field->fmt, field->buf); 1240 1241 return trace_handle_return(s); 1242 } 1243 1244 static struct trace_event_functions trace_bprint_funcs = { 1245 .trace = trace_bprint_print, 1246 .raw = trace_bprint_raw, 1247 }; 1248 1249 static struct trace_event trace_bprint_event = { 1250 .type = TRACE_BPRINT, 1251 .funcs = &trace_bprint_funcs, 1252 }; 1253 1254 /* TRACE_PRINT */ 1255 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1256 int flags, struct trace_event *event) 1257 { 1258 struct print_entry *field; 1259 struct trace_seq *s = &iter->seq; 1260 1261 trace_assign_type(field, iter->ent); 1262 1263 seq_print_ip_sym(s, field->ip, flags); 1264 trace_seq_printf(s, ": %s", field->buf); 1265 1266 return trace_handle_return(s); 1267 } 1268 1269 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1270 struct trace_event *event) 1271 { 1272 struct print_entry *field; 1273 1274 trace_assign_type(field, iter->ent); 1275 1276 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); 1277 1278 return trace_handle_return(&iter->seq); 1279 } 1280 1281 static struct trace_event_functions trace_print_funcs = { 1282 .trace = trace_print_print, 1283 .raw = trace_print_raw, 1284 }; 1285 1286 static struct trace_event trace_print_event = { 1287 .type = TRACE_PRINT, 1288 .funcs = &trace_print_funcs, 1289 }; 1290 1291 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags, 1292 struct trace_event *event) 1293 { 1294 struct raw_data_entry *field; 1295 int i; 1296 1297 trace_assign_type(field, iter->ent); 1298 1299 trace_seq_printf(&iter->seq, "# %x buf:", field->id); 1300 1301 for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++) 1302 trace_seq_printf(&iter->seq, " %02x", 1303 (unsigned char)field->buf[i]); 1304 1305 trace_seq_putc(&iter->seq, '\n'); 1306 1307 return trace_handle_return(&iter->seq); 1308 } 1309 1310 static struct trace_event_functions trace_raw_data_funcs = { 1311 .trace = trace_raw_data, 1312 .raw = trace_raw_data, 1313 }; 1314 1315 static struct trace_event trace_raw_data_event = { 1316 .type = TRACE_RAW_DATA, 1317 .funcs = &trace_raw_data_funcs, 1318 }; 1319 1320 1321 static struct trace_event *events[] __initdata = { 1322 &trace_fn_event, 1323 &trace_ctx_event, 1324 &trace_wake_event, 1325 &trace_stack_event, 1326 &trace_user_stack_event, 1327 &trace_bputs_event, 1328 &trace_bprint_event, 1329 &trace_print_event, 1330 &trace_hwlat_event, 1331 &trace_raw_data_event, 1332 NULL 1333 }; 1334 1335 __init static int init_events(void) 1336 { 1337 struct trace_event *event; 1338 int i, ret; 1339 1340 for (i = 0; events[i]; i++) { 1341 event = events[i]; 1342 1343 ret = register_trace_event(event); 1344 if (!ret) { 1345 printk(KERN_WARNING "event %d failed to register\n", 1346 event->type); 1347 WARN_ON_ONCE(1); 1348 } 1349 } 1350 1351 return 0; 1352 } 1353 early_initcall(init_events); 1354