1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "builtin.h" 5 #include "perf.h" 6 7 #include "util/evlist.h" // for struct evsel_str_handler 8 #include "util/evsel.h" 9 #include "util/symbol.h" 10 #include "util/thread.h" 11 #include "util/header.h" 12 #include "util/target.h" 13 #include "util/callchain.h" 14 #include "util/lock-contention.h" 15 16 #include <subcmd/pager.h> 17 #include <subcmd/parse-options.h> 18 #include "util/trace-event.h" 19 #include "util/tracepoint.h" 20 21 #include "util/debug.h" 22 #include "util/session.h" 23 #include "util/tool.h" 24 #include "util/data.h" 25 #include "util/string2.h" 26 #include "util/map.h" 27 #include "util/util.h" 28 29 #include <sys/types.h> 30 #include <sys/prctl.h> 31 #include <semaphore.h> 32 #include <math.h> 33 #include <limits.h> 34 35 #include <linux/list.h> 36 #include <linux/hash.h> 37 #include <linux/kernel.h> 38 #include <linux/zalloc.h> 39 #include <linux/err.h> 40 #include <linux/stringify.h> 41 42 static struct perf_session *session; 43 static struct target target; 44 45 /* based on kernel/lockdep.c */ 46 #define LOCKHASH_BITS 12 47 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) 48 49 static struct hlist_head lockhash_table[LOCKHASH_SIZE]; 50 51 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) 52 #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) 53 54 static struct rb_root thread_stats; 55 56 static bool combine_locks; 57 static bool show_thread_stats; 58 static bool use_bpf; 59 static unsigned long bpf_map_entries = 10240; 60 static int max_stack_depth = CONTENTION_STACK_DEPTH; 61 static int stack_skip = CONTENTION_STACK_SKIP; 62 static int print_nr_entries = INT_MAX / 2; 63 64 static enum { 65 LOCK_AGGR_ADDR, 66 LOCK_AGGR_TASK, 67 LOCK_AGGR_CALLER, 68 } aggr_mode = LOCK_AGGR_ADDR; 69 70 static u64 sched_text_start; 71 static u64 sched_text_end; 72 static u64 lock_text_start; 73 static u64 lock_text_end; 74 75 static struct thread_stat *thread_stat_find(u32 tid) 76 { 77 struct rb_node *node; 78 struct thread_stat *st; 79 80 node = thread_stats.rb_node; 81 while (node) { 82 st = container_of(node, struct thread_stat, rb); 83 if (st->tid == tid) 84 return st; 85 else if (tid < st->tid) 86 node = node->rb_left; 87 else 88 node = node->rb_right; 89 } 90 91 return NULL; 92 } 93 94 static void thread_stat_insert(struct thread_stat *new) 95 { 96 struct rb_node **rb = &thread_stats.rb_node; 97 struct rb_node *parent = NULL; 98 struct thread_stat *p; 99 100 while (*rb) { 101 p = container_of(*rb, struct thread_stat, rb); 102 parent = *rb; 103 104 if (new->tid < p->tid) 105 rb = &(*rb)->rb_left; 106 else if (new->tid > p->tid) 107 rb = &(*rb)->rb_right; 108 else 109 BUG_ON("inserting invalid thread_stat\n"); 110 } 111 112 rb_link_node(&new->rb, parent, rb); 113 rb_insert_color(&new->rb, &thread_stats); 114 } 115 116 static struct thread_stat *thread_stat_findnew_after_first(u32 tid) 117 { 118 struct thread_stat *st; 119 120 st = thread_stat_find(tid); 121 if (st) 122 return st; 123 124 st = zalloc(sizeof(struct thread_stat)); 125 if (!st) { 126 pr_err("memory allocation failed\n"); 127 return NULL; 128 } 129 130 st->tid = tid; 131 INIT_LIST_HEAD(&st->seq_list); 132 133 thread_stat_insert(st); 134 135 return st; 136 } 137 138 static struct thread_stat *thread_stat_findnew_first(u32 tid); 139 static struct thread_stat *(*thread_stat_findnew)(u32 tid) = 140 thread_stat_findnew_first; 141 142 static struct thread_stat *thread_stat_findnew_first(u32 tid) 143 { 144 struct thread_stat *st; 145 146 st = zalloc(sizeof(struct thread_stat)); 147 if (!st) { 148 pr_err("memory allocation failed\n"); 149 return NULL; 150 } 151 st->tid = tid; 152 INIT_LIST_HEAD(&st->seq_list); 153 154 rb_link_node(&st->rb, NULL, &thread_stats.rb_node); 155 rb_insert_color(&st->rb, &thread_stats); 156 157 thread_stat_findnew = thread_stat_findnew_after_first; 158 return st; 159 } 160 161 /* build simple key function one is bigger than two */ 162 #define SINGLE_KEY(member) \ 163 static int lock_stat_key_ ## member(struct lock_stat *one, \ 164 struct lock_stat *two) \ 165 { \ 166 return one->member > two->member; \ 167 } 168 169 SINGLE_KEY(nr_acquired) 170 SINGLE_KEY(nr_contended) 171 SINGLE_KEY(avg_wait_time) 172 SINGLE_KEY(wait_time_total) 173 SINGLE_KEY(wait_time_max) 174 175 static int lock_stat_key_wait_time_min(struct lock_stat *one, 176 struct lock_stat *two) 177 { 178 u64 s1 = one->wait_time_min; 179 u64 s2 = two->wait_time_min; 180 if (s1 == ULLONG_MAX) 181 s1 = 0; 182 if (s2 == ULLONG_MAX) 183 s2 = 0; 184 return s1 > s2; 185 } 186 187 struct lock_key { 188 /* 189 * name: the value for specify by user 190 * this should be simpler than raw name of member 191 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total 192 */ 193 const char *name; 194 /* header: the string printed on the header line */ 195 const char *header; 196 /* len: the printing width of the field */ 197 int len; 198 /* key: a pointer to function to compare two lock stats for sorting */ 199 int (*key)(struct lock_stat*, struct lock_stat*); 200 /* print: a pointer to function to print a given lock stats */ 201 void (*print)(struct lock_key*, struct lock_stat*); 202 /* list: list entry to link this */ 203 struct list_head list; 204 }; 205 206 static void lock_stat_key_print_time(unsigned long long nsec, int len) 207 { 208 static const struct { 209 float base; 210 const char *unit; 211 } table[] = { 212 { 1e9 * 3600, "h " }, 213 { 1e9 * 60, "m " }, 214 { 1e9, "s " }, 215 { 1e6, "ms" }, 216 { 1e3, "us" }, 217 { 0, NULL }, 218 }; 219 220 for (int i = 0; table[i].unit; i++) { 221 if (nsec < table[i].base) 222 continue; 223 224 pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit); 225 return; 226 } 227 228 pr_info("%*llu %s", len - 3, nsec, "ns"); 229 } 230 231 #define PRINT_KEY(member) \ 232 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 233 struct lock_stat *ls) \ 234 { \ 235 pr_info("%*llu", key->len, (unsigned long long)ls->member); \ 236 } 237 238 #define PRINT_TIME(member) \ 239 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 240 struct lock_stat *ls) \ 241 { \ 242 lock_stat_key_print_time((unsigned long long)ls->member, key->len); \ 243 } 244 245 PRINT_KEY(nr_acquired) 246 PRINT_KEY(nr_contended) 247 PRINT_TIME(avg_wait_time) 248 PRINT_TIME(wait_time_total) 249 PRINT_TIME(wait_time_max) 250 251 static void lock_stat_key_print_wait_time_min(struct lock_key *key, 252 struct lock_stat *ls) 253 { 254 u64 wait_time = ls->wait_time_min; 255 256 if (wait_time == ULLONG_MAX) 257 wait_time = 0; 258 259 lock_stat_key_print_time(wait_time, key->len); 260 } 261 262 263 static const char *sort_key = "acquired"; 264 265 static int (*compare)(struct lock_stat *, struct lock_stat *); 266 267 static struct rb_root sorted; /* place to store intermediate data */ 268 static struct rb_root result; /* place to store sorted data */ 269 270 static LIST_HEAD(lock_keys); 271 static const char *output_fields; 272 273 #define DEF_KEY_LOCK(name, header, fn_suffix, len) \ 274 { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} } 275 static struct lock_key report_keys[] = { 276 DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10), 277 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 278 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 279 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 280 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 281 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 282 283 /* extra comparisons much complicated should be here */ 284 { } 285 }; 286 287 static struct lock_key contention_keys[] = { 288 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 289 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 290 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 291 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 292 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 293 294 /* extra comparisons much complicated should be here */ 295 { } 296 }; 297 298 static int select_key(bool contention) 299 { 300 int i; 301 struct lock_key *keys = report_keys; 302 303 if (contention) 304 keys = contention_keys; 305 306 for (i = 0; keys[i].name; i++) { 307 if (!strcmp(keys[i].name, sort_key)) { 308 compare = keys[i].key; 309 310 /* selected key should be in the output fields */ 311 if (list_empty(&keys[i].list)) 312 list_add_tail(&keys[i].list, &lock_keys); 313 314 return 0; 315 } 316 } 317 318 pr_err("Unknown compare key: %s\n", sort_key); 319 return -1; 320 } 321 322 static int add_output_field(bool contention, char *name) 323 { 324 int i; 325 struct lock_key *keys = report_keys; 326 327 if (contention) 328 keys = contention_keys; 329 330 for (i = 0; keys[i].name; i++) { 331 if (strcmp(keys[i].name, name)) 332 continue; 333 334 /* prevent double link */ 335 if (list_empty(&keys[i].list)) 336 list_add_tail(&keys[i].list, &lock_keys); 337 338 return 0; 339 } 340 341 pr_err("Unknown output field: %s\n", name); 342 return -1; 343 } 344 345 static int setup_output_field(bool contention, const char *str) 346 { 347 char *tok, *tmp, *orig; 348 int i, ret = 0; 349 struct lock_key *keys = report_keys; 350 351 if (contention) 352 keys = contention_keys; 353 354 /* no output field given: use all of them */ 355 if (str == NULL) { 356 for (i = 0; keys[i].name; i++) 357 list_add_tail(&keys[i].list, &lock_keys); 358 return 0; 359 } 360 361 for (i = 0; keys[i].name; i++) 362 INIT_LIST_HEAD(&keys[i].list); 363 364 orig = tmp = strdup(str); 365 if (orig == NULL) 366 return -ENOMEM; 367 368 while ((tok = strsep(&tmp, ",")) != NULL){ 369 ret = add_output_field(contention, tok); 370 if (ret < 0) 371 break; 372 } 373 free(orig); 374 375 return ret; 376 } 377 378 static void combine_lock_stats(struct lock_stat *st) 379 { 380 struct rb_node **rb = &sorted.rb_node; 381 struct rb_node *parent = NULL; 382 struct lock_stat *p; 383 int ret; 384 385 while (*rb) { 386 p = container_of(*rb, struct lock_stat, rb); 387 parent = *rb; 388 389 if (st->name && p->name) 390 ret = strcmp(st->name, p->name); 391 else 392 ret = !!st->name - !!p->name; 393 394 if (ret == 0) { 395 p->nr_acquired += st->nr_acquired; 396 p->nr_contended += st->nr_contended; 397 p->wait_time_total += st->wait_time_total; 398 399 if (p->nr_contended) 400 p->avg_wait_time = p->wait_time_total / p->nr_contended; 401 402 if (p->wait_time_min > st->wait_time_min) 403 p->wait_time_min = st->wait_time_min; 404 if (p->wait_time_max < st->wait_time_max) 405 p->wait_time_max = st->wait_time_max; 406 407 p->broken |= st->broken; 408 st->combined = 1; 409 return; 410 } 411 412 if (ret < 0) 413 rb = &(*rb)->rb_left; 414 else 415 rb = &(*rb)->rb_right; 416 } 417 418 rb_link_node(&st->rb, parent, rb); 419 rb_insert_color(&st->rb, &sorted); 420 } 421 422 static void insert_to_result(struct lock_stat *st, 423 int (*bigger)(struct lock_stat *, struct lock_stat *)) 424 { 425 struct rb_node **rb = &result.rb_node; 426 struct rb_node *parent = NULL; 427 struct lock_stat *p; 428 429 if (combine_locks && st->combined) 430 return; 431 432 while (*rb) { 433 p = container_of(*rb, struct lock_stat, rb); 434 parent = *rb; 435 436 if (bigger(st, p)) 437 rb = &(*rb)->rb_left; 438 else 439 rb = &(*rb)->rb_right; 440 } 441 442 rb_link_node(&st->rb, parent, rb); 443 rb_insert_color(&st->rb, &result); 444 } 445 446 /* returns left most element of result, and erase it */ 447 static struct lock_stat *pop_from_result(void) 448 { 449 struct rb_node *node = result.rb_node; 450 451 if (!node) 452 return NULL; 453 454 while (node->rb_left) 455 node = node->rb_left; 456 457 rb_erase(node, &result); 458 return container_of(node, struct lock_stat, rb); 459 } 460 461 static struct lock_stat *lock_stat_find(u64 addr) 462 { 463 struct hlist_head *entry = lockhashentry(addr); 464 struct lock_stat *ret; 465 466 hlist_for_each_entry(ret, entry, hash_entry) { 467 if (ret->addr == addr) 468 return ret; 469 } 470 return NULL; 471 } 472 473 static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags) 474 { 475 struct hlist_head *entry = lockhashentry(addr); 476 struct lock_stat *ret, *new; 477 478 hlist_for_each_entry(ret, entry, hash_entry) { 479 if (ret->addr == addr) 480 return ret; 481 } 482 483 new = zalloc(sizeof(struct lock_stat)); 484 if (!new) 485 goto alloc_failed; 486 487 new->addr = addr; 488 new->name = strdup(name); 489 if (!new->name) { 490 free(new); 491 goto alloc_failed; 492 } 493 494 new->flags = flags; 495 new->wait_time_min = ULLONG_MAX; 496 497 hlist_add_head(&new->hash_entry, entry); 498 return new; 499 500 alloc_failed: 501 pr_err("memory allocation failed\n"); 502 return NULL; 503 } 504 505 struct trace_lock_handler { 506 /* it's used on CONFIG_LOCKDEP */ 507 int (*acquire_event)(struct evsel *evsel, 508 struct perf_sample *sample); 509 510 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 511 int (*acquired_event)(struct evsel *evsel, 512 struct perf_sample *sample); 513 514 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 515 int (*contended_event)(struct evsel *evsel, 516 struct perf_sample *sample); 517 518 /* it's used on CONFIG_LOCKDEP */ 519 int (*release_event)(struct evsel *evsel, 520 struct perf_sample *sample); 521 522 /* it's used when CONFIG_LOCKDEP is off */ 523 int (*contention_begin_event)(struct evsel *evsel, 524 struct perf_sample *sample); 525 526 /* it's used when CONFIG_LOCKDEP is off */ 527 int (*contention_end_event)(struct evsel *evsel, 528 struct perf_sample *sample); 529 }; 530 531 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr) 532 { 533 struct lock_seq_stat *seq; 534 535 list_for_each_entry(seq, &ts->seq_list, list) { 536 if (seq->addr == addr) 537 return seq; 538 } 539 540 seq = zalloc(sizeof(struct lock_seq_stat)); 541 if (!seq) { 542 pr_err("memory allocation failed\n"); 543 return NULL; 544 } 545 seq->state = SEQ_STATE_UNINITIALIZED; 546 seq->addr = addr; 547 548 list_add(&seq->list, &ts->seq_list); 549 return seq; 550 } 551 552 enum broken_state { 553 BROKEN_ACQUIRE, 554 BROKEN_ACQUIRED, 555 BROKEN_CONTENDED, 556 BROKEN_RELEASE, 557 BROKEN_MAX, 558 }; 559 560 static int bad_hist[BROKEN_MAX]; 561 562 enum acquire_flags { 563 TRY_LOCK = 1, 564 READ_LOCK = 2, 565 }; 566 567 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid) 568 { 569 switch (aggr_mode) { 570 case LOCK_AGGR_ADDR: 571 *key = addr; 572 break; 573 case LOCK_AGGR_TASK: 574 *key = tid; 575 break; 576 case LOCK_AGGR_CALLER: 577 default: 578 pr_err("Invalid aggregation mode: %d\n", aggr_mode); 579 return -EINVAL; 580 } 581 return 0; 582 } 583 584 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample); 585 586 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel, 587 struct perf_sample *sample) 588 { 589 if (aggr_mode == LOCK_AGGR_CALLER) { 590 *key = callchain_id(evsel, sample); 591 return 0; 592 } 593 return get_key_by_aggr_mode_simple(key, addr, sample->tid); 594 } 595 596 static int report_lock_acquire_event(struct evsel *evsel, 597 struct perf_sample *sample) 598 { 599 struct lock_stat *ls; 600 struct thread_stat *ts; 601 struct lock_seq_stat *seq; 602 const char *name = evsel__strval(evsel, sample, "name"); 603 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 604 int flag = evsel__intval(evsel, sample, "flags"); 605 u64 key; 606 int ret; 607 608 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 609 if (ret < 0) 610 return ret; 611 612 ls = lock_stat_findnew(key, name, 0); 613 if (!ls) 614 return -ENOMEM; 615 616 ts = thread_stat_findnew(sample->tid); 617 if (!ts) 618 return -ENOMEM; 619 620 seq = get_seq(ts, addr); 621 if (!seq) 622 return -ENOMEM; 623 624 switch (seq->state) { 625 case SEQ_STATE_UNINITIALIZED: 626 case SEQ_STATE_RELEASED: 627 if (!flag) { 628 seq->state = SEQ_STATE_ACQUIRING; 629 } else { 630 if (flag & TRY_LOCK) 631 ls->nr_trylock++; 632 if (flag & READ_LOCK) 633 ls->nr_readlock++; 634 seq->state = SEQ_STATE_READ_ACQUIRED; 635 seq->read_count = 1; 636 ls->nr_acquired++; 637 } 638 break; 639 case SEQ_STATE_READ_ACQUIRED: 640 if (flag & READ_LOCK) { 641 seq->read_count++; 642 ls->nr_acquired++; 643 goto end; 644 } else { 645 goto broken; 646 } 647 break; 648 case SEQ_STATE_ACQUIRED: 649 case SEQ_STATE_ACQUIRING: 650 case SEQ_STATE_CONTENDED: 651 broken: 652 /* broken lock sequence */ 653 if (!ls->broken) { 654 ls->broken = 1; 655 bad_hist[BROKEN_ACQUIRE]++; 656 } 657 list_del_init(&seq->list); 658 free(seq); 659 goto end; 660 default: 661 BUG_ON("Unknown state of lock sequence found!\n"); 662 break; 663 } 664 665 ls->nr_acquire++; 666 seq->prev_event_time = sample->time; 667 end: 668 return 0; 669 } 670 671 static int report_lock_acquired_event(struct evsel *evsel, 672 struct perf_sample *sample) 673 { 674 struct lock_stat *ls; 675 struct thread_stat *ts; 676 struct lock_seq_stat *seq; 677 u64 contended_term; 678 const char *name = evsel__strval(evsel, sample, "name"); 679 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 680 u64 key; 681 int ret; 682 683 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 684 if (ret < 0) 685 return ret; 686 687 ls = lock_stat_findnew(key, name, 0); 688 if (!ls) 689 return -ENOMEM; 690 691 ts = thread_stat_findnew(sample->tid); 692 if (!ts) 693 return -ENOMEM; 694 695 seq = get_seq(ts, addr); 696 if (!seq) 697 return -ENOMEM; 698 699 switch (seq->state) { 700 case SEQ_STATE_UNINITIALIZED: 701 /* orphan event, do nothing */ 702 return 0; 703 case SEQ_STATE_ACQUIRING: 704 break; 705 case SEQ_STATE_CONTENDED: 706 contended_term = sample->time - seq->prev_event_time; 707 ls->wait_time_total += contended_term; 708 if (contended_term < ls->wait_time_min) 709 ls->wait_time_min = contended_term; 710 if (ls->wait_time_max < contended_term) 711 ls->wait_time_max = contended_term; 712 break; 713 case SEQ_STATE_RELEASED: 714 case SEQ_STATE_ACQUIRED: 715 case SEQ_STATE_READ_ACQUIRED: 716 /* broken lock sequence */ 717 if (!ls->broken) { 718 ls->broken = 1; 719 bad_hist[BROKEN_ACQUIRED]++; 720 } 721 list_del_init(&seq->list); 722 free(seq); 723 goto end; 724 default: 725 BUG_ON("Unknown state of lock sequence found!\n"); 726 break; 727 } 728 729 seq->state = SEQ_STATE_ACQUIRED; 730 ls->nr_acquired++; 731 ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0; 732 seq->prev_event_time = sample->time; 733 end: 734 return 0; 735 } 736 737 static int report_lock_contended_event(struct evsel *evsel, 738 struct perf_sample *sample) 739 { 740 struct lock_stat *ls; 741 struct thread_stat *ts; 742 struct lock_seq_stat *seq; 743 const char *name = evsel__strval(evsel, sample, "name"); 744 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 745 u64 key; 746 int ret; 747 748 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 749 if (ret < 0) 750 return ret; 751 752 ls = lock_stat_findnew(key, name, 0); 753 if (!ls) 754 return -ENOMEM; 755 756 ts = thread_stat_findnew(sample->tid); 757 if (!ts) 758 return -ENOMEM; 759 760 seq = get_seq(ts, addr); 761 if (!seq) 762 return -ENOMEM; 763 764 switch (seq->state) { 765 case SEQ_STATE_UNINITIALIZED: 766 /* orphan event, do nothing */ 767 return 0; 768 case SEQ_STATE_ACQUIRING: 769 break; 770 case SEQ_STATE_RELEASED: 771 case SEQ_STATE_ACQUIRED: 772 case SEQ_STATE_READ_ACQUIRED: 773 case SEQ_STATE_CONTENDED: 774 /* broken lock sequence */ 775 if (!ls->broken) { 776 ls->broken = 1; 777 bad_hist[BROKEN_CONTENDED]++; 778 } 779 list_del_init(&seq->list); 780 free(seq); 781 goto end; 782 default: 783 BUG_ON("Unknown state of lock sequence found!\n"); 784 break; 785 } 786 787 seq->state = SEQ_STATE_CONTENDED; 788 ls->nr_contended++; 789 ls->avg_wait_time = ls->wait_time_total/ls->nr_contended; 790 seq->prev_event_time = sample->time; 791 end: 792 return 0; 793 } 794 795 static int report_lock_release_event(struct evsel *evsel, 796 struct perf_sample *sample) 797 { 798 struct lock_stat *ls; 799 struct thread_stat *ts; 800 struct lock_seq_stat *seq; 801 const char *name = evsel__strval(evsel, sample, "name"); 802 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 803 u64 key; 804 int ret; 805 806 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 807 if (ret < 0) 808 return ret; 809 810 ls = lock_stat_findnew(key, name, 0); 811 if (!ls) 812 return -ENOMEM; 813 814 ts = thread_stat_findnew(sample->tid); 815 if (!ts) 816 return -ENOMEM; 817 818 seq = get_seq(ts, addr); 819 if (!seq) 820 return -ENOMEM; 821 822 switch (seq->state) { 823 case SEQ_STATE_UNINITIALIZED: 824 goto end; 825 case SEQ_STATE_ACQUIRED: 826 break; 827 case SEQ_STATE_READ_ACQUIRED: 828 seq->read_count--; 829 BUG_ON(seq->read_count < 0); 830 if (seq->read_count) { 831 ls->nr_release++; 832 goto end; 833 } 834 break; 835 case SEQ_STATE_ACQUIRING: 836 case SEQ_STATE_CONTENDED: 837 case SEQ_STATE_RELEASED: 838 /* broken lock sequence */ 839 if (!ls->broken) { 840 ls->broken = 1; 841 bad_hist[BROKEN_RELEASE]++; 842 } 843 goto free_seq; 844 default: 845 BUG_ON("Unknown state of lock sequence found!\n"); 846 break; 847 } 848 849 ls->nr_release++; 850 free_seq: 851 list_del_init(&seq->list); 852 free(seq); 853 end: 854 return 0; 855 } 856 857 bool is_lock_function(struct machine *machine, u64 addr) 858 { 859 if (!sched_text_start) { 860 struct map *kmap; 861 struct symbol *sym; 862 863 sym = machine__find_kernel_symbol_by_name(machine, 864 "__sched_text_start", 865 &kmap); 866 if (!sym) { 867 /* to avoid retry */ 868 sched_text_start = 1; 869 return false; 870 } 871 872 sched_text_start = kmap->unmap_ip(kmap, sym->start); 873 874 /* should not fail from here */ 875 sym = machine__find_kernel_symbol_by_name(machine, 876 "__sched_text_end", 877 &kmap); 878 sched_text_end = kmap->unmap_ip(kmap, sym->start); 879 880 sym = machine__find_kernel_symbol_by_name(machine, 881 "__lock_text_start", 882 &kmap); 883 lock_text_start = kmap->unmap_ip(kmap, sym->start); 884 885 sym = machine__find_kernel_symbol_by_name(machine, 886 "__lock_text_end", 887 &kmap); 888 lock_text_end = kmap->unmap_ip(kmap, sym->start); 889 } 890 891 /* failed to get kernel symbols */ 892 if (sched_text_start == 1) 893 return false; 894 895 /* mutex and rwsem functions are in sched text section */ 896 if (sched_text_start <= addr && addr < sched_text_end) 897 return true; 898 899 /* spinlock functions are in lock text section */ 900 if (lock_text_start <= addr && addr < lock_text_end) 901 return true; 902 903 return false; 904 } 905 906 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip, 907 char *buf, int size) 908 { 909 u64 offset; 910 911 if (map == NULL || sym == NULL) { 912 buf[0] = '\0'; 913 return 0; 914 } 915 916 offset = map->map_ip(map, ip) - sym->start; 917 918 if (offset) 919 return scnprintf(buf, size, "%s+%#lx", sym->name, offset); 920 else 921 return strlcpy(buf, sym->name, size); 922 } 923 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample, 924 char *buf, int size) 925 { 926 struct thread *thread; 927 struct callchain_cursor *cursor = &callchain_cursor; 928 struct machine *machine = &session->machines.host; 929 struct symbol *sym; 930 int skip = 0; 931 int ret; 932 933 /* lock names will be replaced to task name later */ 934 if (show_thread_stats) 935 return -1; 936 937 thread = machine__findnew_thread(machine, -1, sample->pid); 938 if (thread == NULL) 939 return -1; 940 941 /* use caller function name from the callchain */ 942 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 943 NULL, NULL, max_stack_depth); 944 if (ret != 0) { 945 thread__put(thread); 946 return -1; 947 } 948 949 callchain_cursor_commit(cursor); 950 thread__put(thread); 951 952 while (true) { 953 struct callchain_cursor_node *node; 954 955 node = callchain_cursor_current(cursor); 956 if (node == NULL) 957 break; 958 959 /* skip first few entries - for lock functions */ 960 if (++skip <= stack_skip) 961 goto next; 962 963 sym = node->ms.sym; 964 if (sym && !is_lock_function(machine, node->ip)) { 965 get_symbol_name_offset(node->ms.map, sym, node->ip, 966 buf, size); 967 return 0; 968 } 969 970 next: 971 callchain_cursor_advance(cursor); 972 } 973 return -1; 974 } 975 976 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample) 977 { 978 struct callchain_cursor *cursor = &callchain_cursor; 979 struct machine *machine = &session->machines.host; 980 struct thread *thread; 981 u64 hash = 0; 982 int skip = 0; 983 int ret; 984 985 thread = machine__findnew_thread(machine, -1, sample->pid); 986 if (thread == NULL) 987 return -1; 988 989 /* use caller function name from the callchain */ 990 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 991 NULL, NULL, max_stack_depth); 992 thread__put(thread); 993 994 if (ret != 0) 995 return -1; 996 997 callchain_cursor_commit(cursor); 998 999 while (true) { 1000 struct callchain_cursor_node *node; 1001 1002 node = callchain_cursor_current(cursor); 1003 if (node == NULL) 1004 break; 1005 1006 /* skip first few entries - for lock functions */ 1007 if (++skip <= stack_skip) 1008 goto next; 1009 1010 if (node->ms.sym && is_lock_function(machine, node->ip)) 1011 goto next; 1012 1013 hash ^= hash_long((unsigned long)node->ip, 64); 1014 1015 next: 1016 callchain_cursor_advance(cursor); 1017 } 1018 return hash; 1019 } 1020 1021 static u64 *get_callstack(struct perf_sample *sample, int max_stack) 1022 { 1023 u64 *callstack; 1024 u64 i; 1025 int c; 1026 1027 callstack = calloc(max_stack, sizeof(*callstack)); 1028 if (callstack == NULL) 1029 return NULL; 1030 1031 for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) { 1032 u64 ip = sample->callchain->ips[i]; 1033 1034 if (ip >= PERF_CONTEXT_MAX) 1035 continue; 1036 1037 callstack[c++] = ip; 1038 } 1039 return callstack; 1040 } 1041 1042 static int report_lock_contention_begin_event(struct evsel *evsel, 1043 struct perf_sample *sample) 1044 { 1045 struct lock_stat *ls; 1046 struct thread_stat *ts; 1047 struct lock_seq_stat *seq; 1048 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1049 u64 key; 1050 int ret; 1051 1052 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1053 if (ret < 0) 1054 return ret; 1055 1056 ls = lock_stat_find(key); 1057 if (!ls) { 1058 char buf[128]; 1059 const char *caller = buf; 1060 unsigned int flags = evsel__intval(evsel, sample, "flags"); 1061 1062 if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0) 1063 caller = "Unknown"; 1064 1065 ls = lock_stat_findnew(key, caller, flags); 1066 if (!ls) 1067 return -ENOMEM; 1068 1069 if (aggr_mode == LOCK_AGGR_CALLER && verbose) { 1070 ls->callstack = get_callstack(sample, max_stack_depth); 1071 if (ls->callstack == NULL) 1072 return -ENOMEM; 1073 } 1074 } 1075 1076 ts = thread_stat_findnew(sample->tid); 1077 if (!ts) 1078 return -ENOMEM; 1079 1080 seq = get_seq(ts, addr); 1081 if (!seq) 1082 return -ENOMEM; 1083 1084 switch (seq->state) { 1085 case SEQ_STATE_UNINITIALIZED: 1086 case SEQ_STATE_ACQUIRED: 1087 break; 1088 case SEQ_STATE_CONTENDED: 1089 /* 1090 * It can have nested contention begin with mutex spinning, 1091 * then we would use the original contention begin event and 1092 * ignore the second one. 1093 */ 1094 goto end; 1095 case SEQ_STATE_ACQUIRING: 1096 case SEQ_STATE_READ_ACQUIRED: 1097 case SEQ_STATE_RELEASED: 1098 /* broken lock sequence */ 1099 if (!ls->broken) { 1100 ls->broken = 1; 1101 bad_hist[BROKEN_CONTENDED]++; 1102 } 1103 list_del_init(&seq->list); 1104 free(seq); 1105 goto end; 1106 default: 1107 BUG_ON("Unknown state of lock sequence found!\n"); 1108 break; 1109 } 1110 1111 if (seq->state != SEQ_STATE_CONTENDED) { 1112 seq->state = SEQ_STATE_CONTENDED; 1113 seq->prev_event_time = sample->time; 1114 ls->nr_contended++; 1115 } 1116 end: 1117 return 0; 1118 } 1119 1120 static int report_lock_contention_end_event(struct evsel *evsel, 1121 struct perf_sample *sample) 1122 { 1123 struct lock_stat *ls; 1124 struct thread_stat *ts; 1125 struct lock_seq_stat *seq; 1126 u64 contended_term; 1127 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1128 u64 key; 1129 int ret; 1130 1131 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1132 if (ret < 0) 1133 return ret; 1134 1135 ls = lock_stat_find(key); 1136 if (!ls) 1137 return 0; 1138 1139 ts = thread_stat_find(sample->tid); 1140 if (!ts) 1141 return 0; 1142 1143 seq = get_seq(ts, addr); 1144 if (!seq) 1145 return -ENOMEM; 1146 1147 switch (seq->state) { 1148 case SEQ_STATE_UNINITIALIZED: 1149 goto end; 1150 case SEQ_STATE_CONTENDED: 1151 contended_term = sample->time - seq->prev_event_time; 1152 ls->wait_time_total += contended_term; 1153 if (contended_term < ls->wait_time_min) 1154 ls->wait_time_min = contended_term; 1155 if (ls->wait_time_max < contended_term) 1156 ls->wait_time_max = contended_term; 1157 break; 1158 case SEQ_STATE_ACQUIRING: 1159 case SEQ_STATE_ACQUIRED: 1160 case SEQ_STATE_READ_ACQUIRED: 1161 case SEQ_STATE_RELEASED: 1162 /* broken lock sequence */ 1163 if (!ls->broken) { 1164 ls->broken = 1; 1165 bad_hist[BROKEN_ACQUIRED]++; 1166 } 1167 list_del_init(&seq->list); 1168 free(seq); 1169 goto end; 1170 default: 1171 BUG_ON("Unknown state of lock sequence found!\n"); 1172 break; 1173 } 1174 1175 seq->state = SEQ_STATE_ACQUIRED; 1176 ls->nr_acquired++; 1177 ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired; 1178 end: 1179 return 0; 1180 } 1181 1182 /* lock oriented handlers */ 1183 /* TODO: handlers for CPU oriented, thread oriented */ 1184 static struct trace_lock_handler report_lock_ops = { 1185 .acquire_event = report_lock_acquire_event, 1186 .acquired_event = report_lock_acquired_event, 1187 .contended_event = report_lock_contended_event, 1188 .release_event = report_lock_release_event, 1189 .contention_begin_event = report_lock_contention_begin_event, 1190 .contention_end_event = report_lock_contention_end_event, 1191 }; 1192 1193 static struct trace_lock_handler contention_lock_ops = { 1194 .contention_begin_event = report_lock_contention_begin_event, 1195 .contention_end_event = report_lock_contention_end_event, 1196 }; 1197 1198 1199 static struct trace_lock_handler *trace_handler; 1200 1201 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample) 1202 { 1203 if (trace_handler->acquire_event) 1204 return trace_handler->acquire_event(evsel, sample); 1205 return 0; 1206 } 1207 1208 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample) 1209 { 1210 if (trace_handler->acquired_event) 1211 return trace_handler->acquired_event(evsel, sample); 1212 return 0; 1213 } 1214 1215 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample) 1216 { 1217 if (trace_handler->contended_event) 1218 return trace_handler->contended_event(evsel, sample); 1219 return 0; 1220 } 1221 1222 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample) 1223 { 1224 if (trace_handler->release_event) 1225 return trace_handler->release_event(evsel, sample); 1226 return 0; 1227 } 1228 1229 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample) 1230 { 1231 if (trace_handler->contention_begin_event) 1232 return trace_handler->contention_begin_event(evsel, sample); 1233 return 0; 1234 } 1235 1236 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample) 1237 { 1238 if (trace_handler->contention_end_event) 1239 return trace_handler->contention_end_event(evsel, sample); 1240 return 0; 1241 } 1242 1243 static void print_bad_events(int bad, int total) 1244 { 1245 /* Output for debug, this have to be removed */ 1246 int i; 1247 int broken = 0; 1248 const char *name[4] = 1249 { "acquire", "acquired", "contended", "release" }; 1250 1251 for (i = 0; i < BROKEN_MAX; i++) 1252 broken += bad_hist[i]; 1253 1254 if (quiet || (broken == 0 && !verbose)) 1255 return; 1256 1257 pr_info("\n=== output for debug===\n\n"); 1258 pr_info("bad: %d, total: %d\n", bad, total); 1259 pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100); 1260 pr_info("histogram of events caused bad sequence\n"); 1261 for (i = 0; i < BROKEN_MAX; i++) 1262 pr_info(" %10s: %d\n", name[i], bad_hist[i]); 1263 } 1264 1265 /* TODO: various way to print, coloring, nano or milli sec */ 1266 static void print_result(void) 1267 { 1268 struct lock_stat *st; 1269 struct lock_key *key; 1270 char cut_name[20]; 1271 int bad, total, printed; 1272 1273 if (!quiet) { 1274 pr_info("%20s ", "Name"); 1275 list_for_each_entry(key, &lock_keys, list) 1276 pr_info("%*s ", key->len, key->header); 1277 pr_info("\n\n"); 1278 } 1279 1280 bad = total = printed = 0; 1281 while ((st = pop_from_result())) { 1282 total++; 1283 if (st->broken) 1284 bad++; 1285 if (!st->nr_acquired) 1286 continue; 1287 1288 bzero(cut_name, 20); 1289 1290 if (strlen(st->name) < 20) { 1291 /* output raw name */ 1292 const char *name = st->name; 1293 1294 if (show_thread_stats) { 1295 struct thread *t; 1296 1297 /* st->addr contains tid of thread */ 1298 t = perf_session__findnew(session, st->addr); 1299 name = thread__comm_str(t); 1300 } 1301 1302 pr_info("%20s ", name); 1303 } else { 1304 strncpy(cut_name, st->name, 16); 1305 cut_name[16] = '.'; 1306 cut_name[17] = '.'; 1307 cut_name[18] = '.'; 1308 cut_name[19] = '\0'; 1309 /* cut off name for saving output style */ 1310 pr_info("%20s ", cut_name); 1311 } 1312 1313 list_for_each_entry(key, &lock_keys, list) { 1314 key->print(key, st); 1315 pr_info(" "); 1316 } 1317 pr_info("\n"); 1318 1319 if (++printed >= print_nr_entries) 1320 break; 1321 } 1322 1323 print_bad_events(bad, total); 1324 } 1325 1326 static bool info_threads, info_map; 1327 1328 static void dump_threads(void) 1329 { 1330 struct thread_stat *st; 1331 struct rb_node *node; 1332 struct thread *t; 1333 1334 pr_info("%10s: comm\n", "Thread ID"); 1335 1336 node = rb_first(&thread_stats); 1337 while (node) { 1338 st = container_of(node, struct thread_stat, rb); 1339 t = perf_session__findnew(session, st->tid); 1340 pr_info("%10d: %s\n", st->tid, thread__comm_str(t)); 1341 node = rb_next(node); 1342 thread__put(t); 1343 } 1344 } 1345 1346 static int compare_maps(struct lock_stat *a, struct lock_stat *b) 1347 { 1348 int ret; 1349 1350 if (a->name && b->name) 1351 ret = strcmp(a->name, b->name); 1352 else 1353 ret = !!a->name - !!b->name; 1354 1355 if (!ret) 1356 return a->addr < b->addr; 1357 else 1358 return ret < 0; 1359 } 1360 1361 static void dump_map(void) 1362 { 1363 unsigned int i; 1364 struct lock_stat *st; 1365 1366 pr_info("Address of instance: name of class\n"); 1367 for (i = 0; i < LOCKHASH_SIZE; i++) { 1368 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1369 insert_to_result(st, compare_maps); 1370 } 1371 } 1372 1373 while ((st = pop_from_result())) 1374 pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name); 1375 } 1376 1377 static int dump_info(void) 1378 { 1379 int rc = 0; 1380 1381 if (info_threads) 1382 dump_threads(); 1383 else if (info_map) 1384 dump_map(); 1385 else { 1386 rc = -1; 1387 pr_err("Unknown type of information\n"); 1388 } 1389 1390 return rc; 1391 } 1392 1393 typedef int (*tracepoint_handler)(struct evsel *evsel, 1394 struct perf_sample *sample); 1395 1396 static int process_sample_event(struct perf_tool *tool __maybe_unused, 1397 union perf_event *event, 1398 struct perf_sample *sample, 1399 struct evsel *evsel, 1400 struct machine *machine) 1401 { 1402 int err = 0; 1403 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1404 sample->tid); 1405 1406 if (thread == NULL) { 1407 pr_debug("problem processing %d event, skipping it.\n", 1408 event->header.type); 1409 return -1; 1410 } 1411 1412 if (evsel->handler != NULL) { 1413 tracepoint_handler f = evsel->handler; 1414 err = f(evsel, sample); 1415 } 1416 1417 thread__put(thread); 1418 1419 return err; 1420 } 1421 1422 static void combine_result(void) 1423 { 1424 unsigned int i; 1425 struct lock_stat *st; 1426 1427 if (!combine_locks) 1428 return; 1429 1430 for (i = 0; i < LOCKHASH_SIZE; i++) { 1431 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1432 combine_lock_stats(st); 1433 } 1434 } 1435 } 1436 1437 static void sort_result(void) 1438 { 1439 unsigned int i; 1440 struct lock_stat *st; 1441 1442 for (i = 0; i < LOCKHASH_SIZE; i++) { 1443 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1444 insert_to_result(st, compare); 1445 } 1446 } 1447 } 1448 1449 static const char *get_type_str(struct lock_stat *st) 1450 { 1451 static const struct { 1452 unsigned int flags; 1453 const char *name; 1454 } table[] = { 1455 { 0, "semaphore" }, 1456 { LCB_F_SPIN, "spinlock" }, 1457 { LCB_F_SPIN | LCB_F_READ, "rwlock:R" }, 1458 { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W"}, 1459 { LCB_F_READ, "rwsem:R" }, 1460 { LCB_F_WRITE, "rwsem:W" }, 1461 { LCB_F_RT, "rtmutex" }, 1462 { LCB_F_RT | LCB_F_READ, "rwlock-rt:R" }, 1463 { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W"}, 1464 { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R" }, 1465 { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W" }, 1466 { LCB_F_MUTEX, "mutex" }, 1467 { LCB_F_MUTEX | LCB_F_SPIN, "mutex" }, 1468 }; 1469 1470 for (unsigned int i = 0; i < ARRAY_SIZE(table); i++) { 1471 if (table[i].flags == st->flags) 1472 return table[i].name; 1473 } 1474 return "unknown"; 1475 } 1476 1477 static void sort_contention_result(void) 1478 { 1479 sort_result(); 1480 } 1481 1482 static void print_contention_result(struct lock_contention *con) 1483 { 1484 struct lock_stat *st; 1485 struct lock_key *key; 1486 int bad, total, printed; 1487 1488 if (!quiet) { 1489 list_for_each_entry(key, &lock_keys, list) 1490 pr_info("%*s ", key->len, key->header); 1491 1492 if (show_thread_stats) 1493 pr_info(" %10s %s\n\n", "pid", "comm"); 1494 else 1495 pr_info(" %10s %s\n\n", "type", "caller"); 1496 } 1497 1498 bad = total = printed = 0; 1499 if (use_bpf) 1500 bad = bad_hist[BROKEN_CONTENDED]; 1501 1502 while ((st = pop_from_result())) { 1503 total += use_bpf ? st->nr_contended : 1; 1504 if (st->broken) 1505 bad++; 1506 1507 list_for_each_entry(key, &lock_keys, list) { 1508 key->print(key, st); 1509 pr_info(" "); 1510 } 1511 1512 if (show_thread_stats) { 1513 struct thread *t; 1514 int pid = st->addr; 1515 1516 /* st->addr contains tid of thread */ 1517 t = perf_session__findnew(session, pid); 1518 pr_info(" %10d %s\n", pid, thread__comm_str(t)); 1519 goto next; 1520 } 1521 1522 pr_info(" %10s %s\n", get_type_str(st), st->name); 1523 if (verbose) { 1524 struct map *kmap; 1525 struct symbol *sym; 1526 char buf[128]; 1527 u64 ip; 1528 1529 for (int i = 0; i < max_stack_depth; i++) { 1530 if (!st->callstack || !st->callstack[i]) 1531 break; 1532 1533 ip = st->callstack[i]; 1534 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); 1535 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); 1536 pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf); 1537 } 1538 } 1539 1540 next: 1541 if (++printed >= print_nr_entries) 1542 break; 1543 } 1544 1545 print_bad_events(bad, total); 1546 } 1547 1548 static const struct evsel_str_handler lock_tracepoints[] = { 1549 { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ 1550 { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1551 { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1552 { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ 1553 }; 1554 1555 static const struct evsel_str_handler contention_tracepoints[] = { 1556 { "lock:contention_begin", evsel__process_contention_begin, }, 1557 { "lock:contention_end", evsel__process_contention_end, }, 1558 }; 1559 1560 static bool force; 1561 1562 static int __cmd_report(bool display_info) 1563 { 1564 int err = -EINVAL; 1565 struct perf_tool eops = { 1566 .sample = process_sample_event, 1567 .comm = perf_event__process_comm, 1568 .mmap = perf_event__process_mmap, 1569 .namespaces = perf_event__process_namespaces, 1570 .ordered_events = true, 1571 }; 1572 struct perf_data data = { 1573 .path = input_name, 1574 .mode = PERF_DATA_MODE_READ, 1575 .force = force, 1576 }; 1577 1578 session = perf_session__new(&data, &eops); 1579 if (IS_ERR(session)) { 1580 pr_err("Initializing perf session failed\n"); 1581 return PTR_ERR(session); 1582 } 1583 1584 /* for lock function check */ 1585 symbol_conf.sort_by_name = true; 1586 symbol__init(&session->header.env); 1587 1588 if (!perf_session__has_traces(session, "lock record")) 1589 goto out_delete; 1590 1591 if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { 1592 pr_err("Initializing perf session tracepoint handlers failed\n"); 1593 goto out_delete; 1594 } 1595 1596 if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) { 1597 pr_err("Initializing perf session tracepoint handlers failed\n"); 1598 goto out_delete; 1599 } 1600 1601 if (setup_output_field(false, output_fields)) 1602 goto out_delete; 1603 1604 if (select_key(false)) 1605 goto out_delete; 1606 1607 if (show_thread_stats) 1608 aggr_mode = LOCK_AGGR_TASK; 1609 1610 err = perf_session__process_events(session); 1611 if (err) 1612 goto out_delete; 1613 1614 setup_pager(); 1615 if (display_info) /* used for info subcommand */ 1616 err = dump_info(); 1617 else { 1618 combine_result(); 1619 sort_result(); 1620 print_result(); 1621 } 1622 1623 out_delete: 1624 perf_session__delete(session); 1625 return err; 1626 } 1627 1628 static void sighandler(int sig __maybe_unused) 1629 { 1630 } 1631 1632 static int __cmd_contention(int argc, const char **argv) 1633 { 1634 int err = -EINVAL; 1635 struct perf_tool eops = { 1636 .sample = process_sample_event, 1637 .comm = perf_event__process_comm, 1638 .mmap = perf_event__process_mmap, 1639 .ordered_events = true, 1640 }; 1641 struct perf_data data = { 1642 .path = input_name, 1643 .mode = PERF_DATA_MODE_READ, 1644 .force = force, 1645 }; 1646 struct lock_contention con = { 1647 .target = &target, 1648 .result = &lockhash_table[0], 1649 .map_nr_entries = bpf_map_entries, 1650 .max_stack = max_stack_depth, 1651 .stack_skip = stack_skip, 1652 }; 1653 1654 session = perf_session__new(use_bpf ? NULL : &data, &eops); 1655 if (IS_ERR(session)) { 1656 pr_err("Initializing perf session failed\n"); 1657 return PTR_ERR(session); 1658 } 1659 1660 con.machine = &session->machines.host; 1661 1662 /* for lock function check */ 1663 symbol_conf.sort_by_name = true; 1664 symbol__init(&session->header.env); 1665 1666 if (use_bpf) { 1667 err = target__validate(&target); 1668 if (err) { 1669 char errbuf[512]; 1670 1671 target__strerror(&target, err, errbuf, 512); 1672 pr_err("%s\n", errbuf); 1673 goto out_delete; 1674 } 1675 1676 signal(SIGINT, sighandler); 1677 signal(SIGCHLD, sighandler); 1678 signal(SIGTERM, sighandler); 1679 1680 con.evlist = evlist__new(); 1681 if (con.evlist == NULL) { 1682 err = -ENOMEM; 1683 goto out_delete; 1684 } 1685 1686 err = evlist__create_maps(con.evlist, &target); 1687 if (err < 0) 1688 goto out_delete; 1689 1690 if (argc) { 1691 err = evlist__prepare_workload(con.evlist, &target, 1692 argv, false, NULL); 1693 if (err < 0) 1694 goto out_delete; 1695 } 1696 1697 if (lock_contention_prepare(&con) < 0) { 1698 pr_err("lock contention BPF setup failed\n"); 1699 goto out_delete; 1700 } 1701 } else { 1702 if (!perf_session__has_traces(session, "lock record")) 1703 goto out_delete; 1704 1705 if (!evlist__find_evsel_by_str(session->evlist, 1706 "lock:contention_begin")) { 1707 pr_err("lock contention evsel not found\n"); 1708 goto out_delete; 1709 } 1710 1711 if (perf_session__set_tracepoints_handlers(session, 1712 contention_tracepoints)) { 1713 pr_err("Initializing perf session tracepoint handlers failed\n"); 1714 goto out_delete; 1715 } 1716 } 1717 1718 if (setup_output_field(true, output_fields)) 1719 goto out_delete; 1720 1721 if (select_key(true)) 1722 goto out_delete; 1723 1724 if (show_thread_stats) 1725 aggr_mode = LOCK_AGGR_TASK; 1726 else 1727 aggr_mode = LOCK_AGGR_CALLER; 1728 1729 if (use_bpf) { 1730 lock_contention_start(); 1731 if (argc) 1732 evlist__start_workload(con.evlist); 1733 1734 /* wait for signal */ 1735 pause(); 1736 1737 lock_contention_stop(); 1738 lock_contention_read(&con); 1739 1740 /* abuse bad hist stats for lost entries */ 1741 bad_hist[BROKEN_CONTENDED] = con.lost; 1742 } else { 1743 err = perf_session__process_events(session); 1744 if (err) 1745 goto out_delete; 1746 } 1747 1748 setup_pager(); 1749 1750 sort_contention_result(); 1751 print_contention_result(&con); 1752 1753 out_delete: 1754 evlist__delete(con.evlist); 1755 lock_contention_finish(); 1756 perf_session__delete(session); 1757 return err; 1758 } 1759 1760 1761 static int __cmd_record(int argc, const char **argv) 1762 { 1763 const char *record_args[] = { 1764 "record", "-R", "-m", "1024", "-c", "1", "--synth", "task", 1765 }; 1766 const char *callgraph_args[] = { 1767 "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH), 1768 }; 1769 unsigned int rec_argc, i, j, ret; 1770 unsigned int nr_tracepoints; 1771 unsigned int nr_callgraph_args = 0; 1772 const char **rec_argv; 1773 bool has_lock_stat = true; 1774 1775 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { 1776 if (!is_valid_tracepoint(lock_tracepoints[i].name)) { 1777 pr_debug("tracepoint %s is not enabled. " 1778 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", 1779 lock_tracepoints[i].name); 1780 has_lock_stat = false; 1781 break; 1782 } 1783 } 1784 1785 if (has_lock_stat) 1786 goto setup_args; 1787 1788 for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) { 1789 if (!is_valid_tracepoint(contention_tracepoints[i].name)) { 1790 pr_err("tracepoint %s is not enabled.\n", 1791 contention_tracepoints[i].name); 1792 return 1; 1793 } 1794 } 1795 1796 nr_callgraph_args = ARRAY_SIZE(callgraph_args); 1797 1798 setup_args: 1799 rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1; 1800 1801 if (has_lock_stat) 1802 nr_tracepoints = ARRAY_SIZE(lock_tracepoints); 1803 else 1804 nr_tracepoints = ARRAY_SIZE(contention_tracepoints); 1805 1806 /* factor of 2 is for -e in front of each tracepoint */ 1807 rec_argc += 2 * nr_tracepoints; 1808 1809 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1810 if (!rec_argv) 1811 return -ENOMEM; 1812 1813 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1814 rec_argv[i] = strdup(record_args[i]); 1815 1816 for (j = 0; j < nr_tracepoints; j++) { 1817 const char *ev_name; 1818 1819 if (has_lock_stat) 1820 ev_name = strdup(lock_tracepoints[j].name); 1821 else 1822 ev_name = strdup(contention_tracepoints[j].name); 1823 1824 if (!ev_name) 1825 return -ENOMEM; 1826 1827 rec_argv[i++] = "-e"; 1828 rec_argv[i++] = ev_name; 1829 } 1830 1831 for (j = 0; j < nr_callgraph_args; j++, i++) 1832 rec_argv[i] = callgraph_args[j]; 1833 1834 for (j = 1; j < (unsigned int)argc; j++, i++) 1835 rec_argv[i] = argv[j]; 1836 1837 BUG_ON(i != rec_argc); 1838 1839 ret = cmd_record(i, rec_argv); 1840 free(rec_argv); 1841 return ret; 1842 } 1843 1844 static int parse_map_entry(const struct option *opt, const char *str, 1845 int unset __maybe_unused) 1846 { 1847 unsigned long *len = (unsigned long *)opt->value; 1848 unsigned long val; 1849 char *endptr; 1850 1851 errno = 0; 1852 val = strtoul(str, &endptr, 0); 1853 if (*endptr != '\0' || errno != 0) { 1854 pr_err("invalid BPF map length: %s\n", str); 1855 return -1; 1856 } 1857 1858 *len = val; 1859 return 0; 1860 } 1861 1862 static int parse_max_stack(const struct option *opt, const char *str, 1863 int unset __maybe_unused) 1864 { 1865 unsigned long *len = (unsigned long *)opt->value; 1866 long val; 1867 char *endptr; 1868 1869 errno = 0; 1870 val = strtol(str, &endptr, 0); 1871 if (*endptr != '\0' || errno != 0) { 1872 pr_err("invalid max stack depth: %s\n", str); 1873 return -1; 1874 } 1875 1876 if (val < 0 || val > sysctl__max_stack()) { 1877 pr_err("invalid max stack depth: %ld\n", val); 1878 return -1; 1879 } 1880 1881 *len = val; 1882 return 0; 1883 } 1884 1885 int cmd_lock(int argc, const char **argv) 1886 { 1887 const struct option lock_options[] = { 1888 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1889 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), 1890 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), 1891 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 1892 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name, 1893 "file", "vmlinux pathname"), 1894 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 1895 "file", "kallsyms pathname"), 1896 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 1897 OPT_END() 1898 }; 1899 1900 const struct option info_options[] = { 1901 OPT_BOOLEAN('t', "threads", &info_threads, 1902 "dump thread list in perf.data"), 1903 OPT_BOOLEAN('m', "map", &info_map, 1904 "map of lock instances (address:name table)"), 1905 OPT_PARENT(lock_options) 1906 }; 1907 1908 const struct option report_options[] = { 1909 OPT_STRING('k', "key", &sort_key, "acquired", 1910 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 1911 OPT_STRING('F', "field", &output_fields, NULL, 1912 "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 1913 /* TODO: type */ 1914 OPT_BOOLEAN('c', "combine-locks", &combine_locks, 1915 "combine locks in the same class"), 1916 OPT_BOOLEAN('t', "threads", &show_thread_stats, 1917 "show per-thread lock stats"), 1918 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 1919 OPT_PARENT(lock_options) 1920 }; 1921 1922 struct option contention_options[] = { 1923 OPT_STRING('k', "key", &sort_key, "wait_total", 1924 "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"), 1925 OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait", 1926 "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"), 1927 OPT_BOOLEAN('t', "threads", &show_thread_stats, 1928 "show per-thread lock stats"), 1929 OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"), 1930 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1931 "System-wide collection from all CPUs"), 1932 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1933 "List of cpus to monitor"), 1934 OPT_STRING('p', "pid", &target.pid, "pid", 1935 "Trace on existing process id"), 1936 OPT_STRING(0, "tid", &target.tid, "tid", 1937 "Trace on existing thread id (exclusive to --pid)"), 1938 OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num", 1939 "Max number of BPF map entries", parse_map_entry), 1940 OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num", 1941 "Set the maximum stack depth when collecting lopck contention, " 1942 "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack), 1943 OPT_INTEGER(0, "stack-skip", &stack_skip, 1944 "Set the number of stack depth to skip when finding a lock caller, " 1945 "Default: " __stringify(CONTENTION_STACK_SKIP)), 1946 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 1947 OPT_PARENT(lock_options) 1948 }; 1949 1950 const char * const info_usage[] = { 1951 "perf lock info [<options>]", 1952 NULL 1953 }; 1954 const char *const lock_subcommands[] = { "record", "report", "script", 1955 "info", "contention", NULL }; 1956 const char *lock_usage[] = { 1957 NULL, 1958 NULL 1959 }; 1960 const char * const report_usage[] = { 1961 "perf lock report [<options>]", 1962 NULL 1963 }; 1964 const char * const contention_usage[] = { 1965 "perf lock contention [<options>]", 1966 NULL 1967 }; 1968 unsigned int i; 1969 int rc = 0; 1970 1971 for (i = 0; i < LOCKHASH_SIZE; i++) 1972 INIT_HLIST_HEAD(lockhash_table + i); 1973 1974 argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands, 1975 lock_usage, PARSE_OPT_STOP_AT_NON_OPTION); 1976 if (!argc) 1977 usage_with_options(lock_usage, lock_options); 1978 1979 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 1980 return __cmd_record(argc, argv); 1981 } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) { 1982 trace_handler = &report_lock_ops; 1983 if (argc) { 1984 argc = parse_options(argc, argv, 1985 report_options, report_usage, 0); 1986 if (argc) 1987 usage_with_options(report_usage, report_options); 1988 } 1989 rc = __cmd_report(false); 1990 } else if (!strcmp(argv[0], "script")) { 1991 /* Aliased to 'perf script' */ 1992 return cmd_script(argc, argv); 1993 } else if (!strcmp(argv[0], "info")) { 1994 if (argc) { 1995 argc = parse_options(argc, argv, 1996 info_options, info_usage, 0); 1997 if (argc) 1998 usage_with_options(info_usage, info_options); 1999 } 2000 /* recycling report_lock_ops */ 2001 trace_handler = &report_lock_ops; 2002 rc = __cmd_report(true); 2003 } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) { 2004 trace_handler = &contention_lock_ops; 2005 sort_key = "wait_total"; 2006 output_fields = "contended,wait_total,wait_max,avg_wait"; 2007 2008 #ifndef HAVE_BPF_SKEL 2009 set_option_nobuild(contention_options, 'b', "use-bpf", 2010 "no BUILD_BPF_SKEL=1", false); 2011 #endif 2012 if (argc) { 2013 argc = parse_options(argc, argv, contention_options, 2014 contention_usage, 0); 2015 } 2016 rc = __cmd_contention(argc, argv); 2017 } else { 2018 usage_with_options(lock_usage, lock_options); 2019 } 2020 2021 return rc; 2022 } 2023