1 /* 2 * Copyright (c) 2014-2019, Intel Corporation 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * * Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright notice, 10 * this list of conditions and the following disclaimer in the documentation 11 * and/or other materials provided with the distribution. 12 * * Neither the name of Intel Corporation nor the names of its contributors 13 * may be used to endorse or promote products derived from this software 14 * without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include "pt_query_decoder.h" 30 #include "pt_sync.h" 31 #include "pt_decoder_function.h" 32 #include "pt_packet.h" 33 #include "pt_packet_decoder.h" 34 #include "pt_config.h" 35 #include "pt_opcodes.h" 36 #include "pt_compiler.h" 37 38 #include "intel-pt.h" 39 40 #include <string.h> 41 #include <stddef.h> 42 #include <stdlib.h> 43 #include <limits.h> 44 45 46 /* Find a FUP in a PSB+ header. 47 * 48 * The packet @decoder must be synchronized onto the trace stream at the 49 * beginning or somewhere inside a PSB+ header. 50 * 51 * It uses @packet to hold trace packets during its search. If the search is 52 * successful, @packet will contain the first (and hopefully only) FUP packet in 53 * this PSB+. Otherwise, @packet may contain anything. 54 * 55 * Returns one if a FUP packet is found (@packet will contain it). 56 * Returns zero if no FUP packet is found (@packet is undefined). 57 * Returns a negative error code otherwise. 58 */ 59 static int pt_qry_find_header_fup(struct pt_packet *packet, 60 struct pt_packet_decoder *decoder) 61 { 62 if (!packet || !decoder) 63 return -pte_internal; 64 65 for (;;) { 66 int errcode; 67 68 errcode = pt_pkt_next(decoder, packet, sizeof(*packet)); 69 if (errcode < 0) 70 return errcode; 71 72 switch (packet->type) { 73 default: 74 /* Ignore the packet. */ 75 break; 76 77 case ppt_psbend: 78 /* There's no FUP in here. */ 79 return 0; 80 81 case ppt_fup: 82 /* Found it. */ 83 return 1; 84 } 85 } 86 } 87 88 int pt_qry_decoder_init(struct pt_query_decoder *decoder, 89 const struct pt_config *config) 90 { 91 int errcode; 92 93 if (!decoder) 94 return -pte_invalid; 95 96 memset(decoder, 0, sizeof(*decoder)); 97 98 errcode = pt_config_from_user(&decoder->config, config); 99 if (errcode < 0) 100 return errcode; 101 102 pt_last_ip_init(&decoder->ip); 103 pt_tnt_cache_init(&decoder->tnt); 104 pt_time_init(&decoder->time); 105 pt_time_init(&decoder->last_time); 106 pt_tcal_init(&decoder->tcal); 107 pt_evq_init(&decoder->evq); 108 109 return 0; 110 } 111 112 struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config) 113 { 114 struct pt_query_decoder *decoder; 115 int errcode; 116 117 decoder = malloc(sizeof(*decoder)); 118 if (!decoder) 119 return NULL; 120 121 errcode = pt_qry_decoder_init(decoder, config); 122 if (errcode < 0) { 123 free(decoder); 124 return NULL; 125 } 126 127 return decoder; 128 } 129 130 void pt_qry_decoder_fini(struct pt_query_decoder *decoder) 131 { 132 (void) decoder; 133 134 /* Nothing to do. */ 135 } 136 137 void pt_qry_free_decoder(struct pt_query_decoder *decoder) 138 { 139 pt_qry_decoder_fini(decoder); 140 free(decoder); 141 } 142 143 static void pt_qry_reset(struct pt_query_decoder *decoder) 144 { 145 if (!decoder) 146 return; 147 148 decoder->enabled = 0; 149 decoder->consume_packet = 0; 150 decoder->event = NULL; 151 152 pt_last_ip_init(&decoder->ip); 153 pt_tnt_cache_init(&decoder->tnt); 154 pt_time_init(&decoder->time); 155 pt_time_init(&decoder->last_time); 156 pt_tcal_init(&decoder->tcal); 157 pt_evq_init(&decoder->evq); 158 } 159 160 static int pt_qry_will_event(const struct pt_query_decoder *decoder) 161 { 162 const struct pt_decoder_function *dfun; 163 164 if (!decoder) 165 return -pte_internal; 166 167 dfun = decoder->next; 168 if (!dfun) 169 return 0; 170 171 if (dfun->flags & pdff_event) 172 return 1; 173 174 if (dfun->flags & pdff_psbend) 175 return pt_evq_pending(&decoder->evq, evb_psbend); 176 177 if (dfun->flags & pdff_tip) 178 return pt_evq_pending(&decoder->evq, evb_tip); 179 180 if (dfun->flags & pdff_fup) 181 return pt_evq_pending(&decoder->evq, evb_fup); 182 183 return 0; 184 } 185 186 static int pt_qry_will_eos(const struct pt_query_decoder *decoder) 187 { 188 const struct pt_decoder_function *dfun; 189 int errcode; 190 191 if (!decoder) 192 return -pte_internal; 193 194 dfun = decoder->next; 195 if (dfun) 196 return 0; 197 198 /* The decoding function may be NULL for two reasons: 199 * 200 * - we ran out of trace 201 * - we ran into a fetch error such as -pte_bad_opc 202 * 203 * Let's fetch again. 204 */ 205 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config); 206 return errcode == -pte_eos; 207 } 208 209 static int pt_qry_status_flags(const struct pt_query_decoder *decoder) 210 { 211 int flags = 0; 212 213 if (!decoder) 214 return -pte_internal; 215 216 /* Some packets force out TNT and any deferred TIPs in order to 217 * establish the correct context for the subsequent packet. 218 * 219 * Users are expected to first navigate to the correct code region 220 * by using up the cached TNT bits before interpreting any subsequent 221 * packets. 222 * 223 * We do need to read ahead in order to signal upcoming events. We may 224 * have already decoded those packets while our user has not navigated 225 * to the correct code region, yet. 226 * 227 * In order to have our user use up the cached TNT bits first, we do 228 * not indicate the next event until the TNT cache is empty. 229 */ 230 if (pt_tnt_cache_is_empty(&decoder->tnt)) { 231 if (pt_qry_will_event(decoder)) 232 flags |= pts_event_pending; 233 234 if (pt_qry_will_eos(decoder)) 235 flags |= pts_eos; 236 } 237 238 return flags; 239 } 240 241 static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder) 242 { 243 const struct pt_decoder_function *dfun; 244 int errcode; 245 246 if (!decoder) 247 return -pte_internal; 248 249 /* Repeat the decoder fetch to reproduce the error. */ 250 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config); 251 if (errcode < 0) 252 return errcode; 253 254 /* We must get some error or something's wrong. */ 255 return -pte_internal; 256 } 257 258 static int pt_qry_read_ahead(struct pt_query_decoder *decoder) 259 { 260 if (!decoder) 261 return -pte_internal; 262 263 for (;;) { 264 const struct pt_decoder_function *dfun; 265 int errcode; 266 267 errcode = pt_df_fetch(&decoder->next, decoder->pos, 268 &decoder->config); 269 if (errcode) 270 return errcode; 271 272 dfun = decoder->next; 273 if (!dfun) 274 return -pte_internal; 275 276 if (!dfun->decode) 277 return -pte_internal; 278 279 /* We're done once we reach 280 * 281 * - a branching related packet. */ 282 if (dfun->flags & (pdff_tip | pdff_tnt)) 283 return 0; 284 285 /* - an event related packet. */ 286 if (pt_qry_will_event(decoder)) 287 return 0; 288 289 /* Decode status update packets. */ 290 errcode = dfun->decode(decoder); 291 if (errcode) { 292 /* Ignore truncated status packets at the end. 293 * 294 * Move beyond the packet and clear @decoder->next to 295 * indicate that we were not able to fetch the next 296 * packet. 297 */ 298 if (errcode == -pte_eos) { 299 decoder->pos = decoder->config.end; 300 decoder->next = NULL; 301 } 302 303 return errcode; 304 } 305 } 306 } 307 308 static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos, 309 uint64_t *addr) 310 { 311 const struct pt_decoder_function *dfun; 312 int status, errcode; 313 314 if (!decoder || !pos) 315 return -pte_invalid; 316 317 pt_qry_reset(decoder); 318 319 decoder->sync = pos; 320 decoder->pos = pos; 321 322 errcode = pt_df_fetch(&decoder->next, pos, &decoder->config); 323 if (errcode) 324 return errcode; 325 326 dfun = decoder->next; 327 328 /* We do need to start at a PSB in order to initialize the state. */ 329 if (dfun != &pt_decode_psb) 330 return -pte_nosync; 331 332 /* Decode the PSB+ header to initialize the state. */ 333 errcode = dfun->decode(decoder); 334 if (errcode < 0) 335 return errcode; 336 337 /* Fill in the start address. 338 * We do this before reading ahead since the latter may read an 339 * adjacent PSB+ that might change the decoder's IP, causing us 340 * to skip code. 341 */ 342 if (addr) { 343 status = pt_last_ip_query(addr, &decoder->ip); 344 345 /* Make sure we don't clobber it later on. */ 346 if (!status) 347 addr = NULL; 348 } 349 350 /* Read ahead until the first query-relevant packet. */ 351 errcode = pt_qry_read_ahead(decoder); 352 if (errcode < 0) 353 return errcode; 354 355 /* We return the current decoder status. */ 356 status = pt_qry_status_flags(decoder); 357 if (status < 0) 358 return status; 359 360 errcode = pt_last_ip_query(addr, &decoder->ip); 361 if (errcode < 0) { 362 /* Indicate the missing IP in the status. */ 363 if (addr) 364 status |= pts_ip_suppressed; 365 } 366 367 return status; 368 } 369 370 static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal, 371 const struct pt_packet_tsc *packet, 372 const struct pt_config *config) 373 { 374 int errcode; 375 376 /* We ignore configuration errors. They will result in imprecise 377 * calibration which will result in imprecise cycle-accurate timing. 378 * 379 * We currently do not track them. 380 */ 381 errcode = pt_tcal_update_tsc(tcal, packet, config); 382 if (errcode < 0 && (errcode != -pte_bad_config)) 383 return errcode; 384 385 /* We ignore configuration errors. They will result in imprecise 386 * timing and are tracked as packet losses in struct pt_time. 387 */ 388 errcode = pt_time_update_tsc(time, packet, config); 389 if (errcode < 0 && (errcode != -pte_bad_config)) 390 return errcode; 391 392 return 0; 393 } 394 395 static int pt_qry_apply_header_tsc(struct pt_time *time, 396 struct pt_time_cal *tcal, 397 const struct pt_packet_tsc *packet, 398 const struct pt_config *config) 399 { 400 int errcode; 401 402 /* We ignore configuration errors. They will result in imprecise 403 * calibration which will result in imprecise cycle-accurate timing. 404 * 405 * We currently do not track them. 406 */ 407 errcode = pt_tcal_header_tsc(tcal, packet, config); 408 if (errcode < 0 && (errcode != -pte_bad_config)) 409 return errcode; 410 411 /* We ignore configuration errors. They will result in imprecise 412 * timing and are tracked as packet losses in struct pt_time. 413 */ 414 errcode = pt_time_update_tsc(time, packet, config); 415 if (errcode < 0 && (errcode != -pte_bad_config)) 416 return errcode; 417 418 return 0; 419 } 420 421 static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal, 422 const struct pt_packet_cbr *packet, 423 const struct pt_config *config) 424 { 425 int errcode; 426 427 /* We ignore configuration errors. They will result in imprecise 428 * calibration which will result in imprecise cycle-accurate timing. 429 * 430 * We currently do not track them. 431 */ 432 errcode = pt_tcal_update_cbr(tcal, packet, config); 433 if (errcode < 0 && (errcode != -pte_bad_config)) 434 return errcode; 435 436 /* We ignore configuration errors. They will result in imprecise 437 * timing and are tracked as packet losses in struct pt_time. 438 */ 439 errcode = pt_time_update_cbr(time, packet, config); 440 if (errcode < 0 && (errcode != -pte_bad_config)) 441 return errcode; 442 443 return 0; 444 } 445 446 static int pt_qry_apply_header_cbr(struct pt_time *time, 447 struct pt_time_cal *tcal, 448 const struct pt_packet_cbr *packet, 449 const struct pt_config *config) 450 { 451 int errcode; 452 453 /* We ignore configuration errors. They will result in imprecise 454 * calibration which will result in imprecise cycle-accurate timing. 455 * 456 * We currently do not track them. 457 */ 458 errcode = pt_tcal_header_cbr(tcal, packet, config); 459 if (errcode < 0 && (errcode != -pte_bad_config)) 460 return errcode; 461 462 /* We ignore configuration errors. They will result in imprecise 463 * timing and are tracked as packet losses in struct pt_time. 464 */ 465 errcode = pt_time_update_cbr(time, packet, config); 466 if (errcode < 0 && (errcode != -pte_bad_config)) 467 return errcode; 468 469 return 0; 470 } 471 472 static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal, 473 const struct pt_packet_tma *packet, 474 const struct pt_config *config) 475 { 476 int errcode; 477 478 /* We ignore configuration errors. They will result in imprecise 479 * calibration which will result in imprecise cycle-accurate timing. 480 * 481 * We currently do not track them. 482 */ 483 errcode = pt_tcal_update_tma(tcal, packet, config); 484 if (errcode < 0 && (errcode != -pte_bad_config)) 485 return errcode; 486 487 /* We ignore configuration errors. They will result in imprecise 488 * timing and are tracked as packet losses in struct pt_time. 489 */ 490 errcode = pt_time_update_tma(time, packet, config); 491 if (errcode < 0 && (errcode != -pte_bad_config)) 492 return errcode; 493 494 return 0; 495 } 496 497 static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal, 498 const struct pt_packet_mtc *packet, 499 const struct pt_config *config) 500 { 501 int errcode; 502 503 /* We ignore configuration errors. They will result in imprecise 504 * calibration which will result in imprecise cycle-accurate timing. 505 * 506 * We currently do not track them. 507 */ 508 errcode = pt_tcal_update_mtc(tcal, packet, config); 509 if (errcode < 0 && (errcode != -pte_bad_config)) 510 return errcode; 511 512 /* We ignore configuration errors. They will result in imprecise 513 * timing and are tracked as packet losses in struct pt_time. 514 */ 515 errcode = pt_time_update_mtc(time, packet, config); 516 if (errcode < 0 && (errcode != -pte_bad_config)) 517 return errcode; 518 519 return 0; 520 } 521 522 static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal, 523 const struct pt_packet_cyc *packet, 524 const struct pt_config *config) 525 { 526 uint64_t fcr; 527 int errcode; 528 529 /* We ignore configuration errors. They will result in imprecise 530 * calibration which will result in imprecise cycle-accurate timing. 531 * 532 * We currently do not track them. 533 */ 534 errcode = pt_tcal_update_cyc(tcal, packet, config); 535 if (errcode < 0 && (errcode != -pte_bad_config)) 536 return errcode; 537 538 /* We need the FastCounter to Cycles ratio below. Fall back to 539 * an invalid ratio of 0 if calibration has not kicked in, yet. 540 * 541 * This will be tracked as packet loss in struct pt_time. 542 */ 543 errcode = pt_tcal_fcr(&fcr, tcal); 544 if (errcode < 0) { 545 if (errcode == -pte_no_time) 546 fcr = 0ull; 547 else 548 return errcode; 549 } 550 551 /* We ignore configuration errors. They will result in imprecise 552 * timing and are tracked as packet losses in struct pt_time. 553 */ 554 errcode = pt_time_update_cyc(time, packet, config, fcr); 555 if (errcode < 0 && (errcode != -pte_bad_config)) 556 return errcode; 557 558 return 0; 559 } 560 561 int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip) 562 { 563 const uint8_t *pos, *sync, *begin; 564 ptrdiff_t space; 565 int errcode; 566 567 if (!decoder) 568 return -pte_invalid; 569 570 begin = decoder->config.begin; 571 sync = decoder->sync; 572 pos = decoder->pos; 573 if (!pos) 574 pos = begin; 575 576 if (pos == sync) 577 pos += ptps_psb; 578 579 if (pos < begin) 580 return -pte_internal; 581 582 /* Start a bit earlier so we find PSB that have been partially consumed 583 * by a preceding packet. 584 */ 585 space = pos - begin; 586 if (ptps_psb <= space) 587 space = ptps_psb - 1; 588 589 pos -= space; 590 591 errcode = pt_sync_forward(&sync, pos, &decoder->config); 592 if (errcode < 0) 593 return errcode; 594 595 return pt_qry_start(decoder, sync, ip); 596 } 597 598 int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip) 599 { 600 const uint8_t *start, *sync; 601 int errcode; 602 603 if (!decoder) 604 return -pte_invalid; 605 606 start = decoder->pos; 607 if (!start) 608 start = decoder->config.end; 609 610 sync = start; 611 for (;;) { 612 errcode = pt_sync_backward(&sync, sync, &decoder->config); 613 if (errcode < 0) 614 return errcode; 615 616 errcode = pt_qry_start(decoder, sync, ip); 617 if (errcode < 0) { 618 /* Ignore incomplete trace segments at the end. We need 619 * a full PSB+ to start decoding. 620 */ 621 if (errcode == -pte_eos) 622 continue; 623 624 return errcode; 625 } 626 627 /* An empty trace segment in the middle of the trace might bring 628 * us back to where we started. 629 * 630 * We're done when we reached a new position. 631 */ 632 if (decoder->pos != start) 633 break; 634 } 635 636 return 0; 637 } 638 639 int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip, 640 uint64_t offset) 641 { 642 const uint8_t *sync, *pos; 643 int errcode; 644 645 if (!decoder) 646 return -pte_invalid; 647 648 pos = decoder->config.begin + offset; 649 650 errcode = pt_sync_set(&sync, pos, &decoder->config); 651 if (errcode < 0) 652 return errcode; 653 654 return pt_qry_start(decoder, sync, ip); 655 } 656 657 int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset) 658 { 659 const uint8_t *begin, *pos; 660 661 if (!decoder || !offset) 662 return -pte_invalid; 663 664 begin = decoder->config.begin; 665 pos = decoder->pos; 666 667 if (!pos) 668 return -pte_nosync; 669 670 *offset = (uint64_t) (int64_t) (pos - begin); 671 return 0; 672 } 673 674 int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder, 675 uint64_t *offset) 676 { 677 const uint8_t *begin, *sync; 678 679 if (!decoder || !offset) 680 return -pte_invalid; 681 682 begin = decoder->config.begin; 683 sync = decoder->sync; 684 685 if (!sync) 686 return -pte_nosync; 687 688 *offset = (uint64_t) (int64_t) (sync - begin); 689 return 0; 690 } 691 692 const struct pt_config * 693 pt_qry_get_config(const struct pt_query_decoder *decoder) 694 { 695 if (!decoder) 696 return NULL; 697 698 return &decoder->config; 699 } 700 701 static int pt_qry_cache_tnt(struct pt_query_decoder *decoder) 702 { 703 int errcode; 704 705 if (!decoder) 706 return -pte_internal; 707 708 for (;;) { 709 const struct pt_decoder_function *dfun; 710 711 dfun = decoder->next; 712 if (!dfun) 713 return pt_qry_provoke_fetch_error(decoder); 714 715 if (!dfun->decode) 716 return -pte_internal; 717 718 /* There's an event ahead of us. */ 719 if (pt_qry_will_event(decoder)) 720 return -pte_bad_query; 721 722 /* Diagnose a TIP that has not been part of an event. */ 723 if (dfun->flags & pdff_tip) 724 return -pte_bad_query; 725 726 /* Clear the decoder's current event so we know when we 727 * accidentally skipped an event. 728 */ 729 decoder->event = NULL; 730 731 /* Apply the decoder function. */ 732 errcode = dfun->decode(decoder); 733 if (errcode) 734 return errcode; 735 736 /* If we skipped an event, we're in trouble. */ 737 if (decoder->event) 738 return -pte_event_ignored; 739 740 /* We're done when we decoded a TNT packet. */ 741 if (dfun->flags & pdff_tnt) 742 break; 743 744 /* Read ahead until the next query-relevant packet. */ 745 errcode = pt_qry_read_ahead(decoder); 746 if (errcode) 747 return errcode; 748 } 749 750 /* Preserve the time at the TNT packet. */ 751 decoder->last_time = decoder->time; 752 753 /* Read ahead until the next query-relevant packet. */ 754 errcode = pt_qry_read_ahead(decoder); 755 if ((errcode < 0) && (errcode != -pte_eos)) 756 return errcode; 757 758 return 0; 759 } 760 761 int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken) 762 { 763 int errcode, query; 764 765 if (!decoder || !taken) 766 return -pte_invalid; 767 768 /* We cache the latest tnt packet in the decoder. Let's re-fill the 769 * cache in case it is empty. 770 */ 771 if (pt_tnt_cache_is_empty(&decoder->tnt)) { 772 errcode = pt_qry_cache_tnt(decoder); 773 if (errcode < 0) 774 return errcode; 775 } 776 777 query = pt_tnt_cache_query(&decoder->tnt); 778 if (query < 0) 779 return query; 780 781 *taken = query; 782 783 return pt_qry_status_flags(decoder); 784 } 785 786 int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr) 787 { 788 int errcode, flags; 789 790 if (!decoder || !addr) 791 return -pte_invalid; 792 793 flags = 0; 794 for (;;) { 795 const struct pt_decoder_function *dfun; 796 797 dfun = decoder->next; 798 if (!dfun) 799 return pt_qry_provoke_fetch_error(decoder); 800 801 if (!dfun->decode) 802 return -pte_internal; 803 804 /* There's an event ahead of us. */ 805 if (pt_qry_will_event(decoder)) 806 return -pte_bad_query; 807 808 /* Clear the decoder's current event so we know when we 809 * accidentally skipped an event. 810 */ 811 decoder->event = NULL; 812 813 /* We may see a single TNT packet if the current tnt is empty. 814 * 815 * If we see a TNT while the current tnt is not empty, it means 816 * that our user got out of sync. Let's report no data and hope 817 * that our user is able to re-sync. 818 */ 819 if ((dfun->flags & pdff_tnt) && 820 !pt_tnt_cache_is_empty(&decoder->tnt)) 821 return -pte_bad_query; 822 823 /* Apply the decoder function. */ 824 errcode = dfun->decode(decoder); 825 if (errcode) 826 return errcode; 827 828 /* If we skipped an event, we're in trouble. */ 829 if (decoder->event) 830 return -pte_event_ignored; 831 832 /* We're done when we found a TIP packet that isn't part of an 833 * event. 834 */ 835 if (dfun->flags & pdff_tip) { 836 uint64_t ip; 837 838 /* We already decoded it, so the branch destination 839 * is stored in the decoder's last ip. 840 */ 841 errcode = pt_last_ip_query(&ip, &decoder->ip); 842 if (errcode < 0) 843 flags |= pts_ip_suppressed; 844 else 845 *addr = ip; 846 847 break; 848 } 849 850 /* Read ahead until the next query-relevant packet. */ 851 errcode = pt_qry_read_ahead(decoder); 852 if (errcode) 853 return errcode; 854 } 855 856 /* Preserve the time at the TIP packet. */ 857 decoder->last_time = decoder->time; 858 859 /* Read ahead until the next query-relevant packet. */ 860 errcode = pt_qry_read_ahead(decoder); 861 if ((errcode < 0) && (errcode != -pte_eos)) 862 return errcode; 863 864 flags |= pt_qry_status_flags(decoder); 865 866 return flags; 867 } 868 869 int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event, 870 size_t size) 871 { 872 int errcode, flags; 873 874 if (!decoder || !event) 875 return -pte_invalid; 876 877 if (size < offsetof(struct pt_event, variant)) 878 return -pte_invalid; 879 880 /* We do not allow querying for events while there are still TNT 881 * bits to consume. 882 */ 883 if (!pt_tnt_cache_is_empty(&decoder->tnt)) 884 return -pte_bad_query; 885 886 /* Do not provide more than we actually have. */ 887 if (sizeof(*event) < size) 888 size = sizeof(*event); 889 890 flags = 0; 891 for (;;) { 892 const struct pt_decoder_function *dfun; 893 894 dfun = decoder->next; 895 if (!dfun) 896 return pt_qry_provoke_fetch_error(decoder); 897 898 if (!dfun->decode) 899 return -pte_internal; 900 901 /* We must not see a TIP or TNT packet unless it belongs 902 * to an event. 903 * 904 * If we see one, it means that our user got out of sync. 905 * Let's report no data and hope that our user is able 906 * to re-sync. 907 */ 908 if ((dfun->flags & (pdff_tip | pdff_tnt)) && 909 !pt_qry_will_event(decoder)) 910 return -pte_bad_query; 911 912 /* Clear the decoder's current event so we know when decoding 913 * produces a new event. 914 */ 915 decoder->event = NULL; 916 917 /* Apply any other decoder function. */ 918 errcode = dfun->decode(decoder); 919 if (errcode) 920 return errcode; 921 922 /* Check if there has been an event. 923 * 924 * Some packets may result in events in some but not in all 925 * configurations. 926 */ 927 if (decoder->event) { 928 (void) memcpy(event, decoder->event, size); 929 break; 930 } 931 932 /* Read ahead until the next query-relevant packet. */ 933 errcode = pt_qry_read_ahead(decoder); 934 if (errcode) 935 return errcode; 936 } 937 938 /* Preserve the time at the event. */ 939 decoder->last_time = decoder->time; 940 941 /* Read ahead until the next query-relevant packet. */ 942 errcode = pt_qry_read_ahead(decoder); 943 if ((errcode < 0) && (errcode != -pte_eos)) 944 return errcode; 945 946 flags |= pt_qry_status_flags(decoder); 947 948 return flags; 949 } 950 951 int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time, 952 uint32_t *lost_mtc, uint32_t *lost_cyc) 953 { 954 if (!decoder || !time) 955 return -pte_invalid; 956 957 return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time); 958 } 959 960 int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr) 961 { 962 if (!decoder || !cbr) 963 return -pte_invalid; 964 965 return pt_time_query_cbr(cbr, &decoder->last_time); 966 } 967 968 static int pt_qry_event_time(struct pt_event *event, 969 const struct pt_query_decoder *decoder) 970 { 971 int errcode; 972 973 if (!event || !decoder) 974 return -pte_internal; 975 976 errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc, 977 &event->lost_cyc, &decoder->time); 978 if (errcode < 0) { 979 if (errcode != -pte_no_time) 980 return errcode; 981 } else 982 event->has_tsc = 1; 983 984 return 0; 985 } 986 987 int pt_qry_decode_unknown(struct pt_query_decoder *decoder) 988 { 989 struct pt_packet packet; 990 int size; 991 992 if (!decoder) 993 return -pte_internal; 994 995 size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config); 996 if (size < 0) 997 return size; 998 999 decoder->pos += size; 1000 return 0; 1001 } 1002 1003 int pt_qry_decode_pad(struct pt_query_decoder *decoder) 1004 { 1005 if (!decoder) 1006 return -pte_internal; 1007 1008 decoder->pos += ptps_pad; 1009 1010 return 0; 1011 } 1012 1013 static int pt_qry_read_psb_header(struct pt_query_decoder *decoder) 1014 { 1015 if (!decoder) 1016 return -pte_internal; 1017 1018 pt_last_ip_init(&decoder->ip); 1019 1020 for (;;) { 1021 const struct pt_decoder_function *dfun; 1022 int errcode; 1023 1024 errcode = pt_df_fetch(&decoder->next, decoder->pos, 1025 &decoder->config); 1026 if (errcode) 1027 return errcode; 1028 1029 dfun = decoder->next; 1030 if (!dfun) 1031 return -pte_internal; 1032 1033 /* We're done once we reach an psbend packet. */ 1034 if (dfun->flags & pdff_psbend) 1035 return 0; 1036 1037 if (!dfun->header) 1038 return -pte_bad_context; 1039 1040 errcode = dfun->header(decoder); 1041 if (errcode) 1042 return errcode; 1043 } 1044 } 1045 1046 int pt_qry_decode_psb(struct pt_query_decoder *decoder) 1047 { 1048 const uint8_t *pos; 1049 int size, errcode; 1050 1051 if (!decoder) 1052 return -pte_internal; 1053 1054 pos = decoder->pos; 1055 1056 size = pt_pkt_read_psb(pos, &decoder->config); 1057 if (size < 0) 1058 return size; 1059 1060 errcode = pt_tcal_update_psb(&decoder->tcal, &decoder->config); 1061 if (errcode < 0) 1062 return errcode; 1063 1064 decoder->pos += size; 1065 1066 errcode = pt_qry_read_psb_header(decoder); 1067 if (errcode < 0) { 1068 /* Move back to the PSB so we have a chance to recover and 1069 * continue decoding. 1070 */ 1071 decoder->pos = pos; 1072 1073 /* Clear any PSB+ events that have already been queued. */ 1074 (void) pt_evq_clear(&decoder->evq, evb_psbend); 1075 1076 /* Reset the decoder's decode function. */ 1077 decoder->next = &pt_decode_psb; 1078 1079 return errcode; 1080 } 1081 1082 /* The next packet following the PSB header will be of type PSBEND. 1083 * 1084 * Decoding this packet will publish the PSB events what have been 1085 * accumulated while reading the PSB header. 1086 */ 1087 return 0; 1088 } 1089 1090 static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event, 1091 const struct pt_query_decoder *decoder) 1092 { 1093 int errcode; 1094 1095 if (!decoder) 1096 return -pte_internal; 1097 1098 errcode = pt_last_ip_query(ip, &decoder->ip); 1099 if (errcode < 0) { 1100 switch (pt_errcode(errcode)) { 1101 case pte_noip: 1102 case pte_ip_suppressed: 1103 event->ip_suppressed = 1; 1104 break; 1105 1106 default: 1107 return errcode; 1108 } 1109 } 1110 1111 return 0; 1112 } 1113 1114 /* Decode a generic IP packet. 1115 * 1116 * Returns the number of bytes read, on success. 1117 * Returns -pte_eos if the ip does not fit into the buffer. 1118 * Returns -pte_bad_packet if the ip compression is not known. 1119 */ 1120 static int pt_qry_decode_ip(struct pt_query_decoder *decoder) 1121 { 1122 struct pt_packet_ip packet; 1123 int errcode, size; 1124 1125 if (!decoder) 1126 return -pte_internal; 1127 1128 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config); 1129 if (size < 0) 1130 return size; 1131 1132 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config); 1133 if (errcode < 0) 1134 return errcode; 1135 1136 /* We do not update the decoder's position, yet. */ 1137 1138 return size; 1139 } 1140 1141 static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size) 1142 { 1143 if (!decoder) 1144 return -pte_internal; 1145 1146 decoder->pos += size; 1147 return 0; 1148 } 1149 1150 static int pt_qry_event_tip(struct pt_event *ev, 1151 struct pt_query_decoder *decoder) 1152 { 1153 if (!ev || !decoder) 1154 return -pte_internal; 1155 1156 switch (ev->type) { 1157 case ptev_async_branch: 1158 decoder->consume_packet = 1; 1159 1160 return pt_qry_event_ip(&ev->variant.async_branch.to, ev, 1161 decoder); 1162 1163 case ptev_async_paging: 1164 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev, 1165 decoder); 1166 1167 case ptev_async_vmcs: 1168 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev, 1169 decoder); 1170 1171 case ptev_exec_mode: 1172 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, 1173 decoder); 1174 1175 default: 1176 break; 1177 } 1178 1179 return -pte_bad_context; 1180 } 1181 1182 int pt_qry_decode_tip(struct pt_query_decoder *decoder) 1183 { 1184 struct pt_event *ev; 1185 int size, errcode; 1186 1187 if (!decoder) 1188 return -pte_internal; 1189 1190 size = pt_qry_decode_ip(decoder); 1191 if (size < 0) 1192 return size; 1193 1194 /* Process any pending events binding to TIP. */ 1195 ev = pt_evq_dequeue(&decoder->evq, evb_tip); 1196 if (ev) { 1197 errcode = pt_qry_event_tip(ev, decoder); 1198 if (errcode < 0) 1199 return errcode; 1200 1201 /* Publish the event. */ 1202 decoder->event = ev; 1203 1204 /* Process further pending events. */ 1205 if (pt_evq_pending(&decoder->evq, evb_tip)) 1206 return 0; 1207 1208 /* No further events. 1209 * 1210 * If none of the events consumed the packet, we're done. 1211 */ 1212 if (!decoder->consume_packet) 1213 return 0; 1214 1215 /* We're done with this packet. Clear the flag we set previously 1216 * and consume it. 1217 */ 1218 decoder->consume_packet = 0; 1219 } 1220 1221 return pt_qry_consume_tip(decoder, size); 1222 } 1223 1224 int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder) 1225 { 1226 struct pt_packet_tnt packet; 1227 int size, errcode; 1228 1229 if (!decoder) 1230 return -pte_internal; 1231 1232 size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config); 1233 if (size < 0) 1234 return size; 1235 1236 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet, 1237 &decoder->config); 1238 if (errcode < 0) 1239 return errcode; 1240 1241 decoder->pos += size; 1242 return 0; 1243 } 1244 1245 int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder) 1246 { 1247 struct pt_packet_tnt packet; 1248 int size, errcode; 1249 1250 if (!decoder) 1251 return -pte_internal; 1252 1253 size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config); 1254 if (size < 0) 1255 return size; 1256 1257 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet, 1258 &decoder->config); 1259 if (errcode < 0) 1260 return errcode; 1261 1262 decoder->pos += size; 1263 return 0; 1264 } 1265 1266 static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size) 1267 { 1268 if (!decoder) 1269 return -pte_internal; 1270 1271 decoder->pos += size; 1272 return 0; 1273 } 1274 1275 static int pt_qry_event_tip_pge(struct pt_event *ev, 1276 const struct pt_query_decoder *decoder) 1277 { 1278 if (!ev) 1279 return -pte_internal; 1280 1281 switch (ev->type) { 1282 case ptev_exec_mode: 1283 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder); 1284 1285 default: 1286 break; 1287 } 1288 1289 return -pte_bad_context; 1290 } 1291 1292 int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder) 1293 { 1294 struct pt_event *ev; 1295 int size, errcode; 1296 1297 if (!decoder) 1298 return -pte_internal; 1299 1300 size = pt_qry_decode_ip(decoder); 1301 if (size < 0) 1302 return size; 1303 1304 /* We send the enable event first. This is more convenient for our users 1305 * and does not require them to either store or blindly apply other 1306 * events that might be pending. 1307 * 1308 * We use the consume packet decoder flag to indicate this. 1309 */ 1310 if (!decoder->consume_packet) { 1311 /* This packet signals a standalone enabled event. */ 1312 ev = pt_evq_standalone(&decoder->evq); 1313 if (!ev) 1314 return -pte_internal; 1315 1316 ev->type = ptev_enabled; 1317 1318 /* We can't afford having a suppressed IP here. */ 1319 errcode = pt_last_ip_query(&ev->variant.enabled.ip, 1320 &decoder->ip); 1321 if (errcode < 0) 1322 return -pte_bad_packet; 1323 1324 errcode = pt_qry_event_time(ev, decoder); 1325 if (errcode < 0) 1326 return errcode; 1327 1328 /* Discard any cached TNT bits. 1329 * 1330 * They should have been consumed at the corresponding disable 1331 * event. If they have not, for whatever reason, discard them 1332 * now so our user does not get out of sync. 1333 */ 1334 pt_tnt_cache_init(&decoder->tnt); 1335 1336 /* Process pending events next. */ 1337 decoder->consume_packet = 1; 1338 decoder->enabled = 1; 1339 } else { 1340 /* Process any pending events binding to TIP. */ 1341 ev = pt_evq_dequeue(&decoder->evq, evb_tip); 1342 if (ev) { 1343 errcode = pt_qry_event_tip_pge(ev, decoder); 1344 if (errcode < 0) 1345 return errcode; 1346 } 1347 } 1348 1349 /* We must have an event. Either the initial enable event or one of the 1350 * queued events. 1351 */ 1352 if (!ev) 1353 return -pte_internal; 1354 1355 /* Publish the event. */ 1356 decoder->event = ev; 1357 1358 /* Process further pending events. */ 1359 if (pt_evq_pending(&decoder->evq, evb_tip)) 1360 return 0; 1361 1362 /* We must consume the packet. */ 1363 if (!decoder->consume_packet) 1364 return -pte_internal; 1365 1366 decoder->consume_packet = 0; 1367 1368 return pt_qry_consume_tip_pge(decoder, size); 1369 } 1370 1371 static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size) 1372 { 1373 if (!decoder) 1374 return -pte_internal; 1375 1376 decoder->enabled = 0; 1377 decoder->pos += size; 1378 return 0; 1379 } 1380 1381 static int pt_qry_event_tip_pgd(struct pt_event *ev, 1382 const struct pt_query_decoder *decoder) 1383 { 1384 if (!ev) 1385 return -pte_internal; 1386 1387 switch (ev->type) { 1388 case ptev_async_branch: { 1389 uint64_t at; 1390 1391 /* Turn the async branch into an async disable. */ 1392 at = ev->variant.async_branch.from; 1393 1394 ev->type = ptev_async_disabled; 1395 ev->variant.async_disabled.at = at; 1396 1397 return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev, 1398 decoder); 1399 } 1400 1401 case ptev_async_paging: 1402 case ptev_async_vmcs: 1403 case ptev_exec_mode: 1404 /* These events are ordered after the async disable event. It 1405 * is not quite clear what IP to give them. 1406 * 1407 * If we give them the async disable's source IP, we'd make an 1408 * error if the IP is updated when applying the async disable 1409 * event. 1410 * 1411 * If we give them the async disable's destination IP, we'd make 1412 * an error if the IP is not updated when applying the async 1413 * disable event. That's what our decoders do since tracing is 1414 * likely to resume from there. 1415 * 1416 * In all cases, tracing will be disabled when those events are 1417 * applied, so we may as well suppress the IP. 1418 */ 1419 ev->ip_suppressed = 1; 1420 1421 return 0; 1422 1423 default: 1424 break; 1425 } 1426 1427 return -pte_bad_context; 1428 } 1429 1430 int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder) 1431 { 1432 struct pt_event *ev; 1433 int size, errcode; 1434 1435 if (!decoder) 1436 return -pte_internal; 1437 1438 size = pt_qry_decode_ip(decoder); 1439 if (size < 0) 1440 return size; 1441 1442 /* Process any pending events binding to TIP. */ 1443 ev = pt_evq_dequeue(&decoder->evq, evb_tip); 1444 if (ev) { 1445 errcode = pt_qry_event_tip_pgd(ev, decoder); 1446 if (errcode < 0) 1447 return errcode; 1448 } else { 1449 /* This packet signals a standalone disabled event. */ 1450 ev = pt_evq_standalone(&decoder->evq); 1451 if (!ev) 1452 return -pte_internal; 1453 ev->type = ptev_disabled; 1454 1455 errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev, 1456 decoder); 1457 if (errcode < 0) 1458 return errcode; 1459 1460 errcode = pt_qry_event_time(ev, decoder); 1461 if (errcode < 0) 1462 return errcode; 1463 } 1464 1465 /* We must have an event. Either the initial enable event or one of the 1466 * queued events. 1467 */ 1468 if (!ev) 1469 return -pte_internal; 1470 1471 /* Publish the event. */ 1472 decoder->event = ev; 1473 1474 /* Process further pending events. */ 1475 if (pt_evq_pending(&decoder->evq, evb_tip)) 1476 return 0; 1477 1478 return pt_qry_consume_tip_pgd(decoder, size); 1479 } 1480 1481 static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size) 1482 { 1483 if (!decoder) 1484 return -pte_internal; 1485 1486 decoder->pos += size; 1487 return 0; 1488 } 1489 1490 static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder) 1491 { 1492 for (;;) { 1493 struct pt_packet packet; 1494 int errcode; 1495 1496 errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); 1497 if (errcode < 0) { 1498 /* Running out of packets is not an error. */ 1499 if (errcode == -pte_eos) 1500 errcode = 0; 1501 1502 return errcode; 1503 } 1504 1505 switch (packet.type) { 1506 default: 1507 /* All other packets cancel our search. 1508 * 1509 * We do not enumerate those packets since we also 1510 * want to include new packets. 1511 */ 1512 return 0; 1513 1514 case ppt_tip_pge: 1515 /* We found it - the erratum applies. */ 1516 return 1; 1517 1518 case ppt_pad: 1519 case ppt_tsc: 1520 case ppt_cbr: 1521 case ppt_psbend: 1522 case ppt_pip: 1523 case ppt_mode: 1524 case ppt_vmcs: 1525 case ppt_tma: 1526 case ppt_mtc: 1527 case ppt_cyc: 1528 case ppt_mnt: 1529 /* Intentionally skip a few packets. */ 1530 continue; 1531 } 1532 } 1533 } 1534 1535 static int check_erratum_bdm70(const uint8_t *pos, 1536 const struct pt_config *config) 1537 { 1538 struct pt_packet_decoder decoder; 1539 int errcode; 1540 1541 if (!pos || !config) 1542 return -pte_internal; 1543 1544 errcode = pt_pkt_decoder_init(&decoder, config); 1545 if (errcode < 0) 1546 return errcode; 1547 1548 errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin)); 1549 if (errcode >= 0) 1550 errcode = scan_for_erratum_bdm70(&decoder); 1551 1552 pt_pkt_decoder_fini(&decoder); 1553 return errcode; 1554 } 1555 1556 int pt_qry_header_fup(struct pt_query_decoder *decoder) 1557 { 1558 struct pt_packet_ip packet; 1559 int errcode, size; 1560 1561 if (!decoder) 1562 return -pte_internal; 1563 1564 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config); 1565 if (size < 0) 1566 return size; 1567 1568 if (decoder->config.errata.bdm70 && !decoder->enabled) { 1569 errcode = check_erratum_bdm70(decoder->pos + size, 1570 &decoder->config); 1571 if (errcode < 0) 1572 return errcode; 1573 1574 if (errcode) 1575 return pt_qry_consume_fup(decoder, size); 1576 } 1577 1578 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config); 1579 if (errcode < 0) 1580 return errcode; 1581 1582 /* Tracing is enabled if we have an IP in the header. */ 1583 if (packet.ipc != pt_ipc_suppressed) 1584 decoder->enabled = 1; 1585 1586 return pt_qry_consume_fup(decoder, size); 1587 } 1588 1589 static int pt_qry_event_fup(struct pt_event *ev, 1590 struct pt_query_decoder *decoder) 1591 { 1592 if (!ev || !decoder) 1593 return -pte_internal; 1594 1595 switch (ev->type) { 1596 case ptev_overflow: 1597 decoder->consume_packet = 1; 1598 1599 /* We can't afford having a suppressed IP here. */ 1600 return pt_last_ip_query(&ev->variant.overflow.ip, 1601 &decoder->ip); 1602 1603 case ptev_tsx: 1604 if (!(ev->variant.tsx.aborted)) 1605 decoder->consume_packet = 1; 1606 1607 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder); 1608 1609 case ptev_exstop: 1610 decoder->consume_packet = 1; 1611 1612 return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder); 1613 1614 case ptev_mwait: 1615 decoder->consume_packet = 1; 1616 1617 return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder); 1618 1619 case ptev_ptwrite: 1620 decoder->consume_packet = 1; 1621 1622 return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder); 1623 1624 default: 1625 break; 1626 } 1627 1628 return -pte_internal; 1629 } 1630 1631 int pt_qry_decode_fup(struct pt_query_decoder *decoder) 1632 { 1633 struct pt_event *ev; 1634 int size, errcode; 1635 1636 if (!decoder) 1637 return -pte_internal; 1638 1639 size = pt_qry_decode_ip(decoder); 1640 if (size < 0) 1641 return size; 1642 1643 /* Process any pending events binding to FUP. */ 1644 ev = pt_evq_dequeue(&decoder->evq, evb_fup); 1645 if (ev) { 1646 errcode = pt_qry_event_fup(ev, decoder); 1647 if (errcode < 0) 1648 return errcode; 1649 1650 /* Publish the event. */ 1651 decoder->event = ev; 1652 1653 /* Process further pending events. */ 1654 if (pt_evq_pending(&decoder->evq, evb_fup)) 1655 return 0; 1656 1657 /* No further events. 1658 * 1659 * If none of the events consumed the packet, we're done. 1660 */ 1661 if (!decoder->consume_packet) 1662 return 0; 1663 1664 /* We're done with this packet. Clear the flag we set previously 1665 * and consume it. 1666 */ 1667 decoder->consume_packet = 0; 1668 } else { 1669 /* FUP indicates an async branch event; it binds to TIP. 1670 * 1671 * We do need an IP in this case. 1672 */ 1673 uint64_t ip; 1674 1675 errcode = pt_last_ip_query(&ip, &decoder->ip); 1676 if (errcode < 0) 1677 return errcode; 1678 1679 ev = pt_evq_enqueue(&decoder->evq, evb_tip); 1680 if (!ev) 1681 return -pte_nomem; 1682 1683 ev->type = ptev_async_branch; 1684 ev->variant.async_branch.from = ip; 1685 1686 errcode = pt_qry_event_time(ev, decoder); 1687 if (errcode < 0) 1688 return errcode; 1689 } 1690 1691 return pt_qry_consume_fup(decoder, size); 1692 } 1693 1694 int pt_qry_decode_pip(struct pt_query_decoder *decoder) 1695 { 1696 struct pt_packet_pip packet; 1697 struct pt_event *event; 1698 int size, errcode; 1699 1700 if (!decoder) 1701 return -pte_internal; 1702 1703 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config); 1704 if (size < 0) 1705 return size; 1706 1707 /* Paging events are either standalone or bind to the same TIP packet 1708 * as an in-flight async branch event. 1709 */ 1710 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch); 1711 if (!event) { 1712 event = pt_evq_standalone(&decoder->evq); 1713 if (!event) 1714 return -pte_internal; 1715 event->type = ptev_paging; 1716 event->variant.paging.cr3 = packet.cr3; 1717 event->variant.paging.non_root = packet.nr; 1718 1719 decoder->event = event; 1720 } else { 1721 event = pt_evq_enqueue(&decoder->evq, evb_tip); 1722 if (!event) 1723 return -pte_nomem; 1724 1725 event->type = ptev_async_paging; 1726 event->variant.async_paging.cr3 = packet.cr3; 1727 event->variant.async_paging.non_root = packet.nr; 1728 } 1729 1730 errcode = pt_qry_event_time(event, decoder); 1731 if (errcode < 0) 1732 return errcode; 1733 1734 decoder->pos += size; 1735 return 0; 1736 } 1737 1738 int pt_qry_header_pip(struct pt_query_decoder *decoder) 1739 { 1740 struct pt_packet_pip packet; 1741 struct pt_event *event; 1742 int size; 1743 1744 if (!decoder) 1745 return -pte_internal; 1746 1747 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config); 1748 if (size < 0) 1749 return size; 1750 1751 /* Paging events are reported at the end of the PSB. */ 1752 event = pt_evq_enqueue(&decoder->evq, evb_psbend); 1753 if (!event) 1754 return -pte_nomem; 1755 1756 event->type = ptev_async_paging; 1757 event->variant.async_paging.cr3 = packet.cr3; 1758 event->variant.async_paging.non_root = packet.nr; 1759 1760 decoder->pos += size; 1761 return 0; 1762 } 1763 1764 static int pt_qry_event_psbend(struct pt_event *ev, 1765 struct pt_query_decoder *decoder) 1766 { 1767 int errcode; 1768 1769 if (!ev || !decoder) 1770 return -pte_internal; 1771 1772 /* PSB+ events are status updates. */ 1773 ev->status_update = 1; 1774 1775 errcode = pt_qry_event_time(ev, decoder); 1776 if (errcode < 0) 1777 return errcode; 1778 1779 switch (ev->type) { 1780 case ptev_async_paging: 1781 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev, 1782 decoder); 1783 1784 case ptev_exec_mode: 1785 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder); 1786 1787 case ptev_tsx: 1788 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder); 1789 1790 case ptev_async_vmcs: 1791 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev, 1792 decoder); 1793 1794 case ptev_cbr: 1795 return 0; 1796 1797 case ptev_mnt: 1798 /* Maintenance packets may appear anywhere. Do not mark them as 1799 * status updates even if they appear in PSB+. 1800 */ 1801 ev->status_update = 0; 1802 return 0; 1803 1804 default: 1805 break; 1806 } 1807 1808 return -pte_internal; 1809 } 1810 1811 static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder) 1812 { 1813 struct pt_event *ev; 1814 int errcode; 1815 1816 if (!decoder) 1817 return -pte_internal; 1818 1819 ev = pt_evq_dequeue(&decoder->evq, evb_psbend); 1820 if (!ev) 1821 return 0; 1822 1823 errcode = pt_qry_event_psbend(ev, decoder); 1824 if (errcode < 0) 1825 return errcode; 1826 1827 /* Publish the event. */ 1828 decoder->event = ev; 1829 1830 /* Signal a pending event. */ 1831 return 1; 1832 } 1833 1834 /* Create a standalone overflow event with tracing disabled. 1835 * 1836 * Creates and published the event and disables tracing in @decoder. 1837 * 1838 * Returns zero on success, a negative pt_error_code otherwise. 1839 */ 1840 static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder) 1841 { 1842 struct pt_event *ev; 1843 1844 if (!decoder) 1845 return -pte_internal; 1846 1847 ev = pt_evq_standalone(&decoder->evq); 1848 if (!ev) 1849 return -pte_internal; 1850 1851 ev->type = ptev_overflow; 1852 1853 /* We suppress the IP to indicate that tracing has been disabled before 1854 * the overflow resolved. There can be several events before tracing is 1855 * enabled again. 1856 */ 1857 ev->ip_suppressed = 1; 1858 1859 decoder->enabled = 0; 1860 decoder->event = ev; 1861 1862 return pt_qry_event_time(ev, decoder); 1863 } 1864 1865 /* Queues an overflow event with tracing enabled. 1866 * 1867 * Creates and enqueues the event and enables tracing in @decoder. 1868 * 1869 * Returns zero on success, a negative pt_error_code otherwise. 1870 */ 1871 static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder) 1872 { 1873 struct pt_event *ev; 1874 1875 if (!decoder) 1876 return -pte_internal; 1877 1878 ev = pt_evq_enqueue(&decoder->evq, evb_fup); 1879 if (!ev) 1880 return -pte_internal; 1881 1882 ev->type = ptev_overflow; 1883 1884 decoder->enabled = 1; 1885 1886 return pt_qry_event_time(ev, decoder); 1887 } 1888 1889 /* Recover from SKD010. 1890 * 1891 * Creates and publishes an overflow event at @packet's IP payload. 1892 * 1893 * Further updates @decoder as follows: 1894 * 1895 * - set time tracking to @time and @tcal 1896 * - set the position to @offset 1897 * - set ip to @packet's IP payload 1898 * - set tracing to be enabled 1899 * 1900 * Returns zero on success, a negative error code otherwise. 1901 */ 1902 static int skd010_recover(struct pt_query_decoder *decoder, 1903 const struct pt_packet_ip *packet, 1904 const struct pt_time_cal *tcal, 1905 const struct pt_time *time, uint64_t offset) 1906 { 1907 struct pt_last_ip ip; 1908 struct pt_event *ev; 1909 int errcode; 1910 1911 if (!decoder || !packet || !tcal || !time) 1912 return -pte_internal; 1913 1914 /* We use the decoder's IP. It should be newly initialized. */ 1915 ip = decoder->ip; 1916 1917 /* Extract the IP payload from the packet. */ 1918 errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config); 1919 if (errcode < 0) 1920 return errcode; 1921 1922 /* Synthesize the overflow event. */ 1923 ev = pt_evq_standalone(&decoder->evq); 1924 if (!ev) 1925 return -pte_internal; 1926 1927 ev->type = ptev_overflow; 1928 1929 /* We do need a full IP. */ 1930 errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip); 1931 if (errcode < 0) 1932 return -pte_bad_context; 1933 1934 /* We continue decoding at the given offset. */ 1935 decoder->pos = decoder->config.begin + offset; 1936 1937 /* Tracing is enabled. */ 1938 decoder->enabled = 1; 1939 decoder->ip = ip; 1940 1941 decoder->time = *time; 1942 decoder->tcal = *tcal; 1943 1944 /* Publish the event. */ 1945 decoder->event = ev; 1946 1947 return pt_qry_event_time(ev, decoder); 1948 } 1949 1950 /* Recover from SKD010 with tracing disabled. 1951 * 1952 * Creates and publishes a standalone overflow event. 1953 * 1954 * Further updates @decoder as follows: 1955 * 1956 * - set time tracking to @time and @tcal 1957 * - set the position to @offset 1958 * - set tracing to be disabled 1959 * 1960 * Returns zero on success, a negative error code otherwise. 1961 */ 1962 static int skd010_recover_disabled(struct pt_query_decoder *decoder, 1963 const struct pt_time_cal *tcal, 1964 const struct pt_time *time, uint64_t offset) 1965 { 1966 if (!decoder || !tcal || !time) 1967 return -pte_internal; 1968 1969 decoder->time = *time; 1970 decoder->tcal = *tcal; 1971 1972 /* We continue decoding at the given offset. */ 1973 decoder->pos = decoder->config.begin + offset; 1974 1975 return pt_qry_event_ovf_disabled(decoder); 1976 } 1977 1978 /* Scan ahead for a packet at which to resume after an overflow. 1979 * 1980 * This function is called after an OVF without a corresponding FUP. This 1981 * normally means that the overflow resolved while tracing was disabled. 1982 * 1983 * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped. 1984 * The overflow thus resolved while tracing was enabled (or tracing was enabled 1985 * after the overflow resolved). Search for an indication whether tracing is 1986 * enabled or disabled by scanning upcoming packets. 1987 * 1988 * If we can confirm that tracing is disabled, the erratum does not apply and we 1989 * can continue normally. 1990 * 1991 * If we can confirm that tracing is enabled, the erratum applies and we try to 1992 * recover by synchronizing at a later packet and a different IP. If we can't 1993 * recover, pretend the erratum didn't apply so we run into the error later. 1994 * Since this assumes that tracing is disabled, no harm should be done, i.e. no 1995 * bad trace should be generated. 1996 * 1997 * Returns zero if the overflow is handled. 1998 * Returns a positive value if the overflow is not yet handled. 1999 * Returns a negative error code otherwise. 2000 */ 2001 static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt, 2002 struct pt_query_decoder *decoder) 2003 { 2004 struct pt_time_cal tcal; 2005 struct pt_time time; 2006 struct { 2007 struct pt_time_cal tcal; 2008 struct pt_time time; 2009 uint64_t offset; 2010 } mode_tsx; 2011 int errcode; 2012 2013 if (!decoder) 2014 return -pte_internal; 2015 2016 /* Keep track of time as we skip packets. */ 2017 time = decoder->time; 2018 tcal = decoder->tcal; 2019 2020 /* Keep track of a potential recovery point at MODE.TSX. */ 2021 memset(&mode_tsx, 0, sizeof(mode_tsx)); 2022 2023 for (;;) { 2024 struct pt_packet packet; 2025 uint64_t offset; 2026 2027 errcode = pt_pkt_get_offset(pkt, &offset); 2028 if (errcode < 0) 2029 return errcode; 2030 2031 errcode = pt_pkt_next(pkt, &packet, sizeof(packet)); 2032 if (errcode < 0) { 2033 /* Let's assume the trace is correct if we run out 2034 * of packets. 2035 */ 2036 if (errcode == -pte_eos) 2037 errcode = 1; 2038 2039 return errcode; 2040 } 2041 2042 switch (packet.type) { 2043 case ppt_tip_pge: 2044 /* Everything is fine. There is nothing to do. */ 2045 return 1; 2046 2047 case ppt_tip_pgd: 2048 /* This is a clear indication that the erratum 2049 * applies. 2050 * 2051 * We synchronize after the disable. 2052 */ 2053 return skd010_recover_disabled(decoder, &tcal, &time, 2054 offset + packet.size); 2055 2056 case ppt_tnt_8: 2057 case ppt_tnt_64: 2058 /* This is a clear indication that the erratum 2059 * apllies. 2060 * 2061 * Yet, we can't recover from it as we wouldn't know how 2062 * many TNT bits will have been used when we eventually 2063 * find an IP packet at which to resume tracing. 2064 */ 2065 return 1; 2066 2067 case ppt_pip: 2068 case ppt_vmcs: 2069 /* We could track those changes and synthesize extra 2070 * events after the overflow event when recovering from 2071 * the erratum. This requires infrastructure that we 2072 * don't currently have, though, so we're not going to 2073 * do it. 2074 * 2075 * Instead, we ignore those changes. We already don't 2076 * know how many other changes were lost in the 2077 * overflow. 2078 */ 2079 break; 2080 2081 case ppt_mode: 2082 switch (packet.payload.mode.leaf) { 2083 case pt_mol_exec: 2084 /* A MODE.EXEC packet binds to TIP, i.e. 2085 * 2086 * TIP.PGE: everything is fine 2087 * TIP: the erratum applies 2088 * 2089 * In the TIP.PGE case, we may just follow the 2090 * normal code flow. 2091 * 2092 * In the TIP case, we'd be able to re-sync at 2093 * the TIP IP but have to skip packets up to and 2094 * including the TIP. 2095 * 2096 * We'd need to synthesize the MODE.EXEC event 2097 * after the overflow event when recovering at 2098 * the TIP. We lack the infrastructure for this 2099 * - it's getting too complicated. 2100 * 2101 * Instead, we ignore the execution mode change; 2102 * we already don't know how many more such 2103 * changes were lost in the overflow. 2104 */ 2105 break; 2106 2107 case pt_mol_tsx: 2108 /* A MODE.TSX packet may be standalone or bind 2109 * to FUP. 2110 * 2111 * If this is the second MODE.TSX, we're sure 2112 * that tracing is disabled and everything is 2113 * fine. 2114 */ 2115 if (mode_tsx.offset) 2116 return 1; 2117 2118 /* If we find the FUP this packet binds to, we 2119 * may recover at the FUP IP and restart 2120 * processing packets from here. Remember the 2121 * current state. 2122 */ 2123 mode_tsx.offset = offset; 2124 mode_tsx.time = time; 2125 mode_tsx.tcal = tcal; 2126 2127 break; 2128 } 2129 2130 break; 2131 2132 case ppt_fup: 2133 /* This is a pretty good indication that tracing 2134 * is indeed enabled and the erratum applies. 2135 */ 2136 2137 /* If we got a MODE.TSX packet before, we synchronize at 2138 * the FUP IP but continue decoding packets starting 2139 * from the MODE.TSX. 2140 */ 2141 if (mode_tsx.offset) 2142 return skd010_recover(decoder, 2143 &packet.payload.ip, 2144 &mode_tsx.tcal, 2145 &mode_tsx.time, 2146 mode_tsx.offset); 2147 2148 /* Without a preceding MODE.TSX, this FUP is the start 2149 * of an async branch or disable. We synchronize at the 2150 * FUP IP and continue decoding packets from here. 2151 */ 2152 return skd010_recover(decoder, &packet.payload.ip, 2153 &tcal, &time, offset); 2154 2155 case ppt_tip: 2156 /* We syhchronize at the TIP IP and continue decoding 2157 * packets after the TIP packet. 2158 */ 2159 return skd010_recover(decoder, &packet.payload.ip, 2160 &tcal, &time, 2161 offset + packet.size); 2162 2163 case ppt_psb: 2164 /* We reached a synchronization point. Tracing is 2165 * enabled if and only if the PSB+ contains a FUP. 2166 */ 2167 errcode = pt_qry_find_header_fup(&packet, pkt); 2168 if (errcode < 0) { 2169 /* If we ran out of packets, we can't tell. 2170 * Let's assume the trace is correct. 2171 */ 2172 if (errcode == -pte_eos) 2173 errcode = 1; 2174 2175 return errcode; 2176 } 2177 2178 /* If there is no FUP, tracing is disabled and 2179 * everything is fine. 2180 */ 2181 if (!errcode) 2182 return 1; 2183 2184 /* We should have a FUP. */ 2185 if (packet.type != ppt_fup) 2186 return -pte_internal; 2187 2188 /* Otherwise, we may synchronize at the FUP IP and 2189 * continue decoding packets at the PSB. 2190 */ 2191 return skd010_recover(decoder, &packet.payload.ip, 2192 &tcal, &time, offset); 2193 2194 case ppt_psbend: 2195 /* We shouldn't see this. */ 2196 return -pte_bad_context; 2197 2198 case ppt_ovf: 2199 case ppt_stop: 2200 /* It doesn't matter if it had been enabled or disabled 2201 * before. We may resume normally. 2202 */ 2203 return 1; 2204 2205 case ppt_unknown: 2206 case ppt_invalid: 2207 /* We can't skip this packet. */ 2208 return 1; 2209 2210 case ppt_pad: 2211 case ppt_mnt: 2212 case ppt_pwre: 2213 case ppt_pwrx: 2214 /* Ignore this packet. */ 2215 break; 2216 2217 case ppt_exstop: 2218 /* We may skip a stand-alone EXSTOP. */ 2219 if (!packet.payload.exstop.ip) 2220 break; 2221 2222 fallthrough; 2223 case ppt_mwait: 2224 /* To skip this packet, we'd need to take care of the 2225 * FUP it binds to. This is getting complicated. 2226 */ 2227 return 1; 2228 2229 case ppt_ptw: 2230 /* We may skip a stand-alone PTW. */ 2231 if (!packet.payload.ptw.ip) 2232 break; 2233 2234 /* To skip this packet, we'd need to take care of the 2235 * FUP it binds to. This is getting complicated. 2236 */ 2237 return 1; 2238 2239 case ppt_tsc: 2240 /* Keep track of time. */ 2241 errcode = pt_qry_apply_tsc(&time, &tcal, 2242 &packet.payload.tsc, 2243 &decoder->config); 2244 if (errcode < 0) 2245 return errcode; 2246 2247 break; 2248 2249 case ppt_cbr: 2250 /* Keep track of time. */ 2251 errcode = pt_qry_apply_cbr(&time, &tcal, 2252 &packet.payload.cbr, 2253 &decoder->config); 2254 if (errcode < 0) 2255 return errcode; 2256 2257 break; 2258 2259 case ppt_tma: 2260 /* Keep track of time. */ 2261 errcode = pt_qry_apply_tma(&time, &tcal, 2262 &packet.payload.tma, 2263 &decoder->config); 2264 if (errcode < 0) 2265 return errcode; 2266 2267 break; 2268 2269 case ppt_mtc: 2270 /* Keep track of time. */ 2271 errcode = pt_qry_apply_mtc(&time, &tcal, 2272 &packet.payload.mtc, 2273 &decoder->config); 2274 if (errcode < 0) 2275 return errcode; 2276 2277 break; 2278 2279 case ppt_cyc: 2280 /* Keep track of time. */ 2281 errcode = pt_qry_apply_cyc(&time, &tcal, 2282 &packet.payload.cyc, 2283 &decoder->config); 2284 if (errcode < 0) 2285 return errcode; 2286 2287 break; 2288 } 2289 } 2290 } 2291 2292 static int pt_qry_handle_skd010(struct pt_query_decoder *decoder) 2293 { 2294 struct pt_packet_decoder pkt; 2295 uint64_t offset; 2296 int errcode; 2297 2298 if (!decoder) 2299 return -pte_internal; 2300 2301 errcode = pt_qry_get_offset(decoder, &offset); 2302 if (errcode < 0) 2303 return errcode; 2304 2305 errcode = pt_pkt_decoder_init(&pkt, &decoder->config); 2306 if (errcode < 0) 2307 return errcode; 2308 2309 errcode = pt_pkt_sync_set(&pkt, offset); 2310 if (errcode >= 0) 2311 errcode = skd010_scan_for_ovf_resume(&pkt, decoder); 2312 2313 pt_pkt_decoder_fini(&pkt); 2314 return errcode; 2315 } 2316 2317 /* Scan ahead for an indication whether tracing is enabled or disabled. 2318 * 2319 * Returns zero if tracing is clearly disabled. 2320 * Returns a positive integer if tracing is enabled or if we can't tell. 2321 * Returns a negative error code otherwise. 2322 */ 2323 static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder) 2324 { 2325 if (!decoder) 2326 return -pte_internal; 2327 2328 for (;;) { 2329 struct pt_packet packet; 2330 int status; 2331 2332 status = pt_pkt_next(decoder, &packet, sizeof(packet)); 2333 if (status < 0) { 2334 /* Running out of packets is not an error. */ 2335 if (status == -pte_eos) 2336 status = 1; 2337 2338 return status; 2339 } 2340 2341 switch (packet.type) { 2342 default: 2343 /* Skip other packets. */ 2344 break; 2345 2346 case ppt_stop: 2347 /* Tracing is disabled before a stop. */ 2348 return 0; 2349 2350 case ppt_tip_pge: 2351 /* Tracing gets enabled - it must have been disabled. */ 2352 return 0; 2353 2354 case ppt_tnt_8: 2355 case ppt_tnt_64: 2356 case ppt_tip: 2357 case ppt_tip_pgd: 2358 /* Those packets are only generated when tracing is 2359 * enabled. We're done. 2360 */ 2361 return 1; 2362 2363 case ppt_psb: 2364 /* We reached a synchronization point. Tracing is 2365 * enabled if and only if the PSB+ contains a FUP. 2366 */ 2367 status = pt_qry_find_header_fup(&packet, decoder); 2368 2369 /* If we ran out of packets, we can't tell. */ 2370 if (status == -pte_eos) 2371 status = 1; 2372 2373 return status; 2374 2375 case ppt_psbend: 2376 /* We shouldn't see this. */ 2377 return -pte_bad_context; 2378 2379 case ppt_ovf: 2380 /* It doesn't matter - we run into the next overflow. */ 2381 return 1; 2382 2383 case ppt_unknown: 2384 case ppt_invalid: 2385 /* We can't skip this packet. */ 2386 return 1; 2387 } 2388 } 2389 } 2390 2391 /* Apply workaround for erratum APL12. 2392 * 2393 * We resume from @offset (relative to @decoder->pos) with tracing disabled. On 2394 * our way to the resume location we process packets to update our state. 2395 * 2396 * Any event will be dropped. 2397 * 2398 * Returns zero on success, a negative pt_error_code otherwise. 2399 */ 2400 static int apl12_resume_disabled(struct pt_query_decoder *decoder, 2401 struct pt_packet_decoder *pkt, 2402 unsigned int offset) 2403 { 2404 uint64_t begin, end; 2405 int errcode; 2406 2407 if (!decoder) 2408 return -pte_internal; 2409 2410 errcode = pt_qry_get_offset(decoder, &begin); 2411 if (errcode < 0) 2412 return errcode; 2413 2414 errcode = pt_pkt_sync_set(pkt, begin); 2415 if (errcode < 0) 2416 return errcode; 2417 2418 end = begin + offset; 2419 for (;;) { 2420 struct pt_packet packet; 2421 uint64_t next; 2422 2423 errcode = pt_pkt_next(pkt, &packet, sizeof(packet)); 2424 if (errcode < 0) { 2425 /* Running out of packets is not an error. */ 2426 if (errcode == -pte_eos) 2427 errcode = 0; 2428 2429 return errcode; 2430 } 2431 2432 /* The offset is the start of the next packet. */ 2433 errcode = pt_pkt_get_offset(pkt, &next); 2434 if (errcode < 0) 2435 return errcode; 2436 2437 /* We're done when we reach @offset. 2438 * 2439 * The current @packet will be the FUP after which we started 2440 * our search. We skip it. 2441 * 2442 * Check that we're not accidentally proceeding past @offset. 2443 */ 2444 if (end <= next) { 2445 if (end < next) 2446 return -pte_internal; 2447 2448 break; 2449 } 2450 2451 switch (packet.type) { 2452 default: 2453 /* Skip other packets. */ 2454 break; 2455 2456 case ppt_mode: 2457 case ppt_pip: 2458 case ppt_vmcs: 2459 /* We should not encounter those. 2460 * 2461 * We should not encounter a lot of packets but those 2462 * are state-relevant; let's check them explicitly. 2463 */ 2464 return -pte_internal; 2465 2466 case ppt_tsc: 2467 /* Keep track of time. */ 2468 errcode = pt_qry_apply_tsc(&decoder->time, 2469 &decoder->tcal, 2470 &packet.payload.tsc, 2471 &decoder->config); 2472 if (errcode < 0) 2473 return errcode; 2474 2475 break; 2476 2477 case ppt_cbr: 2478 /* Keep track of time. */ 2479 errcode = pt_qry_apply_cbr(&decoder->time, 2480 &decoder->tcal, 2481 &packet.payload.cbr, 2482 &decoder->config); 2483 if (errcode < 0) 2484 return errcode; 2485 2486 break; 2487 2488 case ppt_tma: 2489 /* Keep track of time. */ 2490 errcode = pt_qry_apply_tma(&decoder->time, 2491 &decoder->tcal, 2492 &packet.payload.tma, 2493 &decoder->config); 2494 if (errcode < 0) 2495 return errcode; 2496 2497 break; 2498 2499 case ppt_mtc: 2500 /* Keep track of time. */ 2501 errcode = pt_qry_apply_mtc(&decoder->time, 2502 &decoder->tcal, 2503 &packet.payload.mtc, 2504 &decoder->config); 2505 if (errcode < 0) 2506 return errcode; 2507 2508 break; 2509 2510 case ppt_cyc: 2511 /* Keep track of time. */ 2512 errcode = pt_qry_apply_cyc(&decoder->time, 2513 &decoder->tcal, 2514 &packet.payload.cyc, 2515 &decoder->config); 2516 if (errcode < 0) 2517 return errcode; 2518 2519 break; 2520 } 2521 } 2522 2523 decoder->pos += offset; 2524 2525 return pt_qry_event_ovf_disabled(decoder); 2526 } 2527 2528 /* Handle erratum APL12. 2529 * 2530 * This function is called when a FUP is found after an OVF. The @offset 2531 * argument gives the relative offset from @decoder->pos to after the FUP. 2532 * 2533 * A FUP after OVF normally indicates that the overflow resolved while tracing 2534 * is enabled. Due to erratum APL12, however, the overflow may have resolved 2535 * while tracing is disabled and still generate a FUP. 2536 * 2537 * We scan ahead for an indication whether tracing is actually disabled. If we 2538 * find one, the erratum applies and we proceed from after the FUP packet. 2539 * 2540 * This will drop any CBR or MTC events. We will update @decoder's timing state 2541 * on CBR but drop the event. 2542 * 2543 * Returns zero if the erratum was handled. 2544 * Returns a positive integer if the erratum was not handled. 2545 * Returns a negative pt_error_code otherwise. 2546 */ 2547 static int pt_qry_handle_apl12(struct pt_query_decoder *decoder, 2548 unsigned int offset) 2549 { 2550 struct pt_packet_decoder pkt; 2551 uint64_t here; 2552 int status; 2553 2554 if (!decoder) 2555 return -pte_internal; 2556 2557 status = pt_qry_get_offset(decoder, &here); 2558 if (status < 0) 2559 return status; 2560 2561 status = pt_pkt_decoder_init(&pkt, &decoder->config); 2562 if (status < 0) 2563 return status; 2564 2565 status = pt_pkt_sync_set(&pkt, here + offset); 2566 if (status >= 0) { 2567 status = apl12_tracing_is_disabled(&pkt); 2568 if (!status) 2569 status = apl12_resume_disabled(decoder, &pkt, offset); 2570 } 2571 2572 pt_pkt_decoder_fini(&pkt); 2573 return status; 2574 } 2575 2576 /* Apply workaround for erratum APL11. 2577 * 2578 * We search for a TIP.PGD and, if we found one, resume from after that packet 2579 * with tracing disabled. On our way to the resume location we process packets 2580 * to update our state. 2581 * 2582 * If we don't find a TIP.PGD but instead some other packet that indicates that 2583 * tracing is disabled, indicate that the erratum does not apply. 2584 * 2585 * Any event will be dropped. 2586 * 2587 * Returns zero if the erratum was handled. 2588 * Returns a positive integer if the erratum was not handled. 2589 * Returns a negative pt_error_code otherwise. 2590 */ 2591 static int apl11_apply(struct pt_query_decoder *decoder, 2592 struct pt_packet_decoder *pkt) 2593 { 2594 struct pt_time_cal tcal; 2595 struct pt_time time; 2596 2597 if (!decoder) 2598 return -pte_internal; 2599 2600 time = decoder->time; 2601 tcal = decoder->tcal; 2602 for (;;) { 2603 struct pt_packet packet; 2604 int errcode; 2605 2606 errcode = pt_pkt_next(pkt, &packet, sizeof(packet)); 2607 if (errcode < 0) 2608 return errcode; 2609 2610 switch (packet.type) { 2611 case ppt_tip_pgd: { 2612 uint64_t offset; 2613 2614 /* We found a TIP.PGD. The erratum applies. 2615 * 2616 * Resume from here with tracing disabled. 2617 */ 2618 errcode = pt_pkt_get_offset(pkt, &offset); 2619 if (errcode < 0) 2620 return errcode; 2621 2622 decoder->time = time; 2623 decoder->tcal = tcal; 2624 decoder->pos = decoder->config.begin + offset; 2625 2626 return pt_qry_event_ovf_disabled(decoder); 2627 } 2628 2629 case ppt_invalid: 2630 return -pte_bad_opc; 2631 2632 case ppt_fup: 2633 case ppt_psb: 2634 case ppt_tip_pge: 2635 case ppt_stop: 2636 case ppt_ovf: 2637 case ppt_mode: 2638 case ppt_pip: 2639 case ppt_vmcs: 2640 case ppt_exstop: 2641 case ppt_mwait: 2642 case ppt_pwre: 2643 case ppt_pwrx: 2644 case ppt_ptw: 2645 /* The erratum does not apply. */ 2646 return 1; 2647 2648 case ppt_unknown: 2649 case ppt_pad: 2650 case ppt_mnt: 2651 /* Skip those packets. */ 2652 break; 2653 2654 case ppt_psbend: 2655 case ppt_tip: 2656 case ppt_tnt_8: 2657 case ppt_tnt_64: 2658 return -pte_bad_context; 2659 2660 2661 case ppt_tsc: 2662 /* Keep track of time. */ 2663 errcode = pt_qry_apply_tsc(&time, &tcal, 2664 &packet.payload.tsc, 2665 &decoder->config); 2666 if (errcode < 0) 2667 return errcode; 2668 2669 break; 2670 2671 case ppt_cbr: 2672 /* Keep track of time. */ 2673 errcode = pt_qry_apply_cbr(&time, &tcal, 2674 &packet.payload.cbr, 2675 &decoder->config); 2676 if (errcode < 0) 2677 return errcode; 2678 2679 break; 2680 2681 case ppt_tma: 2682 /* Keep track of time. */ 2683 errcode = pt_qry_apply_tma(&time, &tcal, 2684 &packet.payload.tma, 2685 &decoder->config); 2686 if (errcode < 0) 2687 return errcode; 2688 2689 break; 2690 2691 case ppt_mtc: 2692 /* Keep track of time. */ 2693 errcode = pt_qry_apply_mtc(&time, &tcal, 2694 &packet.payload.mtc, 2695 &decoder->config); 2696 if (errcode < 0) 2697 return errcode; 2698 2699 break; 2700 2701 case ppt_cyc: 2702 /* Keep track of time. */ 2703 errcode = pt_qry_apply_cyc(&time, &tcal, 2704 &packet.payload.cyc, 2705 &decoder->config); 2706 if (errcode < 0) 2707 return errcode; 2708 2709 break; 2710 } 2711 } 2712 } 2713 2714 /* Handle erratum APL11. 2715 * 2716 * This function is called when we diagnose a bad packet while searching for a 2717 * FUP after an OVF. 2718 * 2719 * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that 2720 * TIP.PGD and resume from there with tracing disabled. 2721 * 2722 * This will drop any CBR or MTC events. We will update @decoder's timing state 2723 * on CBR but drop the event. 2724 * 2725 * Returns zero if the erratum was handled. 2726 * Returns a positive integer if the erratum was not handled. 2727 * Returns a negative pt_error_code otherwise. 2728 */ 2729 static int pt_qry_handle_apl11(struct pt_query_decoder *decoder) 2730 { 2731 struct pt_packet_decoder pkt; 2732 uint64_t offset; 2733 int status; 2734 2735 if (!decoder) 2736 return -pte_internal; 2737 2738 status = pt_qry_get_offset(decoder, &offset); 2739 if (status < 0) 2740 return status; 2741 2742 status = pt_pkt_decoder_init(&pkt, &decoder->config); 2743 if (status < 0) 2744 return status; 2745 2746 status = pt_pkt_sync_set(&pkt, offset); 2747 if (status >= 0) 2748 status = apl11_apply(decoder, &pkt); 2749 2750 pt_pkt_decoder_fini(&pkt); 2751 return status; 2752 } 2753 2754 static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder) 2755 { 2756 for (;;) { 2757 struct pt_packet packet; 2758 int errcode; 2759 2760 errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); 2761 if (errcode < 0) 2762 return errcode; 2763 2764 switch (packet.type) { 2765 case ppt_fup: 2766 return 1; 2767 2768 case ppt_invalid: 2769 return -pte_bad_opc; 2770 2771 case ppt_unknown: 2772 case ppt_pad: 2773 case ppt_mnt: 2774 case ppt_cbr: 2775 case ppt_tsc: 2776 case ppt_tma: 2777 case ppt_mtc: 2778 case ppt_cyc: 2779 continue; 2780 2781 case ppt_psb: 2782 case ppt_tip_pge: 2783 case ppt_mode: 2784 case ppt_pip: 2785 case ppt_vmcs: 2786 case ppt_stop: 2787 case ppt_ovf: 2788 case ppt_exstop: 2789 case ppt_mwait: 2790 case ppt_pwre: 2791 case ppt_pwrx: 2792 case ppt_ptw: 2793 return 0; 2794 2795 case ppt_psbend: 2796 case ppt_tip: 2797 case ppt_tip_pgd: 2798 case ppt_tnt_8: 2799 case ppt_tnt_64: 2800 return -pte_bad_context; 2801 } 2802 } 2803 } 2804 2805 /* Find a FUP to which the current OVF may bind. 2806 * 2807 * Scans the trace for a FUP or for a packet that indicates that tracing is 2808 * disabled. 2809 * 2810 * Return the relative offset of the packet following the found FUP on success. 2811 * Returns zero if no FUP is found and tracing is assumed to be disabled. 2812 * Returns a negative pt_error_code otherwise. 2813 */ 2814 static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder) 2815 { 2816 struct pt_packet_decoder pkt; 2817 uint64_t begin, end, offset; 2818 int status; 2819 2820 if (!decoder) 2821 return -pte_internal; 2822 2823 status = pt_qry_get_offset(decoder, &begin); 2824 if (status < 0) 2825 return status; 2826 2827 status = pt_pkt_decoder_init(&pkt, &decoder->config); 2828 if (status < 0) 2829 return status; 2830 2831 status = pt_pkt_sync_set(&pkt, begin); 2832 if (status >= 0) { 2833 status = pt_pkt_find_ovf_fup(&pkt); 2834 if (status > 0) { 2835 status = pt_pkt_get_offset(&pkt, &end); 2836 if (status < 0) 2837 return status; 2838 2839 if (end <= begin) 2840 return -pte_overflow; 2841 2842 offset = end - begin; 2843 if (INT_MAX < offset) 2844 return -pte_overflow; 2845 2846 status = (int) offset; 2847 } 2848 } 2849 2850 pt_pkt_decoder_fini(&pkt); 2851 return status; 2852 } 2853 2854 int pt_qry_decode_ovf(struct pt_query_decoder *decoder) 2855 { 2856 struct pt_time_cal tcal; 2857 struct pt_time time; 2858 int status, offset; 2859 2860 if (!decoder) 2861 return -pte_internal; 2862 2863 status = pt_qry_process_pending_psb_events(decoder); 2864 if (status < 0) 2865 return status; 2866 2867 /* If we have any pending psbend events, we're done for now. */ 2868 if (status) 2869 return 0; 2870 2871 /* Reset the decoder state but preserve timing. */ 2872 time = decoder->time; 2873 tcal = decoder->tcal; 2874 2875 pt_qry_reset(decoder); 2876 2877 decoder->time = time; 2878 if (decoder->config.flags.variant.query.keep_tcal_on_ovf) { 2879 status = pt_tcal_update_ovf(&tcal, &decoder->config); 2880 if (status < 0) 2881 return status; 2882 2883 decoder->tcal = tcal; 2884 } 2885 2886 /* We must consume the OVF before we search for the binding packet. */ 2887 decoder->pos += ptps_ovf; 2888 2889 /* Overflow binds to either FUP or TIP.PGE. 2890 * 2891 * If the overflow can be resolved while PacketEn=1 it binds to FUP. We 2892 * can see timing packets between OVF anf FUP but that's it. 2893 * 2894 * Otherwise, PacketEn will be zero when the overflow resolves and OVF 2895 * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that 2896 * do not depend on PacketEn. 2897 * 2898 * We don't need to decode everything until TIP.PGE, however. As soon 2899 * as we see a non-timing non-FUP packet, we know that tracing has been 2900 * disabled before the overflow resolves. 2901 */ 2902 offset = pt_qry_find_ovf_fup(decoder); 2903 if (offset <= 0) { 2904 /* Check for erratum SKD010. 2905 * 2906 * The FUP may have been dropped. If we can figure out that 2907 * tracing is enabled and hence the FUP is missing, we resume 2908 * at a later packet and a different IP. 2909 */ 2910 if (decoder->config.errata.skd010) { 2911 status = pt_qry_handle_skd010(decoder); 2912 if (status <= 0) 2913 return status; 2914 } 2915 2916 /* Check for erratum APL11. 2917 * 2918 * We may have gotten an extra TIP.PGD, which should be 2919 * diagnosed by our search for a subsequent FUP. 2920 */ 2921 if (decoder->config.errata.apl11 && 2922 (offset == -pte_bad_context)) { 2923 status = pt_qry_handle_apl11(decoder); 2924 if (status <= 0) 2925 return status; 2926 } 2927 2928 /* Report the original error from searching for the FUP packet 2929 * if we were not able to fix the trace. 2930 * 2931 * We treat an overflow at the end of the trace as standalone. 2932 */ 2933 if (offset < 0 && offset != -pte_eos) 2934 return offset; 2935 2936 return pt_qry_event_ovf_disabled(decoder); 2937 } else { 2938 /* Check for erratum APL12. 2939 * 2940 * We may get an extra FUP even though the overflow resolved 2941 * with tracing disabled. 2942 */ 2943 if (decoder->config.errata.apl12) { 2944 status = pt_qry_handle_apl12(decoder, 2945 (unsigned int) offset); 2946 if (status <= 0) 2947 return status; 2948 } 2949 2950 return pt_qry_event_ovf_enabled(decoder); 2951 } 2952 } 2953 2954 static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder, 2955 const struct pt_packet_mode_exec *packet) 2956 { 2957 struct pt_event *event; 2958 2959 if (!decoder || !packet) 2960 return -pte_internal; 2961 2962 /* MODE.EXEC binds to TIP. */ 2963 event = pt_evq_enqueue(&decoder->evq, evb_tip); 2964 if (!event) 2965 return -pte_nomem; 2966 2967 event->type = ptev_exec_mode; 2968 event->variant.exec_mode.mode = pt_get_exec_mode(packet); 2969 2970 return pt_qry_event_time(event, decoder); 2971 } 2972 2973 static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder, 2974 const struct pt_packet_mode_tsx *packet) 2975 { 2976 struct pt_event *event; 2977 2978 if (!decoder || !packet) 2979 return -pte_internal; 2980 2981 /* MODE.TSX is standalone if tracing is disabled. */ 2982 if (!decoder->enabled) { 2983 event = pt_evq_standalone(&decoder->evq); 2984 if (!event) 2985 return -pte_internal; 2986 2987 /* We don't have an IP in this case. */ 2988 event->variant.tsx.ip = 0; 2989 event->ip_suppressed = 1; 2990 2991 /* Publish the event. */ 2992 decoder->event = event; 2993 } else { 2994 /* MODE.TSX binds to FUP. */ 2995 event = pt_evq_enqueue(&decoder->evq, evb_fup); 2996 if (!event) 2997 return -pte_nomem; 2998 } 2999 3000 event->type = ptev_tsx; 3001 event->variant.tsx.speculative = packet->intx; 3002 event->variant.tsx.aborted = packet->abrt; 3003 3004 return pt_qry_event_time(event, decoder); 3005 } 3006 3007 int pt_qry_decode_mode(struct pt_query_decoder *decoder) 3008 { 3009 struct pt_packet_mode packet; 3010 int size, errcode; 3011 3012 if (!decoder) 3013 return -pte_internal; 3014 3015 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config); 3016 if (size < 0) 3017 return size; 3018 3019 errcode = 0; 3020 switch (packet.leaf) { 3021 case pt_mol_exec: 3022 errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec); 3023 break; 3024 3025 case pt_mol_tsx: 3026 errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx); 3027 break; 3028 } 3029 3030 if (errcode < 0) 3031 return errcode; 3032 3033 decoder->pos += size; 3034 return 0; 3035 } 3036 3037 int pt_qry_header_mode(struct pt_query_decoder *decoder) 3038 { 3039 struct pt_packet_mode packet; 3040 struct pt_event *event; 3041 int size; 3042 3043 if (!decoder) 3044 return -pte_internal; 3045 3046 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config); 3047 if (size < 0) 3048 return size; 3049 3050 /* Inside the header, events are reported at the end. */ 3051 event = pt_evq_enqueue(&decoder->evq, evb_psbend); 3052 if (!event) 3053 return -pte_nomem; 3054 3055 switch (packet.leaf) { 3056 case pt_mol_exec: 3057 event->type = ptev_exec_mode; 3058 event->variant.exec_mode.mode = 3059 pt_get_exec_mode(&packet.bits.exec); 3060 break; 3061 3062 case pt_mol_tsx: 3063 event->type = ptev_tsx; 3064 event->variant.tsx.speculative = packet.bits.tsx.intx; 3065 event->variant.tsx.aborted = packet.bits.tsx.abrt; 3066 break; 3067 } 3068 3069 decoder->pos += size; 3070 return 0; 3071 } 3072 3073 int pt_qry_decode_psbend(struct pt_query_decoder *decoder) 3074 { 3075 int status; 3076 3077 if (!decoder) 3078 return -pte_internal; 3079 3080 status = pt_qry_process_pending_psb_events(decoder); 3081 if (status < 0) 3082 return status; 3083 3084 /* If we had any psb events, we're done for now. */ 3085 if (status) 3086 return 0; 3087 3088 /* Skip the psbend extended opcode that we fetched before if no more 3089 * psbend events are pending. 3090 */ 3091 decoder->pos += ptps_psbend; 3092 return 0; 3093 } 3094 3095 int pt_qry_decode_tsc(struct pt_query_decoder *decoder) 3096 { 3097 struct pt_packet_tsc packet; 3098 int size, errcode; 3099 3100 if (!decoder) 3101 return -pte_internal; 3102 3103 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config); 3104 if (size < 0) 3105 return size; 3106 3107 errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal, 3108 &packet, &decoder->config); 3109 if (errcode < 0) 3110 return errcode; 3111 3112 decoder->pos += size; 3113 return 0; 3114 } 3115 3116 int pt_qry_header_tsc(struct pt_query_decoder *decoder) 3117 { 3118 struct pt_packet_tsc packet; 3119 int size, errcode; 3120 3121 if (!decoder) 3122 return -pte_internal; 3123 3124 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config); 3125 if (size < 0) 3126 return size; 3127 3128 errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal, 3129 &packet, &decoder->config); 3130 if (errcode < 0) 3131 return errcode; 3132 3133 decoder->pos += size; 3134 return 0; 3135 } 3136 3137 int pt_qry_decode_cbr(struct pt_query_decoder *decoder) 3138 { 3139 struct pt_packet_cbr packet; 3140 struct pt_event *event; 3141 int size, errcode; 3142 3143 if (!decoder) 3144 return -pte_internal; 3145 3146 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config); 3147 if (size < 0) 3148 return size; 3149 3150 errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal, 3151 &packet, &decoder->config); 3152 if (errcode < 0) 3153 return errcode; 3154 3155 event = pt_evq_standalone(&decoder->evq); 3156 if (!event) 3157 return -pte_internal; 3158 3159 event->type = ptev_cbr; 3160 event->variant.cbr.ratio = packet.ratio; 3161 3162 decoder->event = event; 3163 3164 errcode = pt_qry_event_time(event, decoder); 3165 if (errcode < 0) 3166 return errcode; 3167 3168 decoder->pos += size; 3169 return 0; 3170 } 3171 3172 int pt_qry_header_cbr(struct pt_query_decoder *decoder) 3173 { 3174 struct pt_packet_cbr packet; 3175 struct pt_event *event; 3176 int size, errcode; 3177 3178 if (!decoder) 3179 return -pte_internal; 3180 3181 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config); 3182 if (size < 0) 3183 return size; 3184 3185 errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal, 3186 &packet, &decoder->config); 3187 if (errcode < 0) 3188 return errcode; 3189 3190 event = pt_evq_enqueue(&decoder->evq, evb_psbend); 3191 if (!event) 3192 return -pte_nomem; 3193 3194 event->type = ptev_cbr; 3195 event->variant.cbr.ratio = packet.ratio; 3196 3197 decoder->pos += size; 3198 return 0; 3199 } 3200 3201 int pt_qry_decode_tma(struct pt_query_decoder *decoder) 3202 { 3203 struct pt_packet_tma packet; 3204 int size, errcode; 3205 3206 if (!decoder) 3207 return -pte_internal; 3208 3209 size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config); 3210 if (size < 0) 3211 return size; 3212 3213 errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal, 3214 &packet, &decoder->config); 3215 if (errcode < 0) 3216 return errcode; 3217 3218 decoder->pos += size; 3219 return 0; 3220 } 3221 3222 int pt_qry_decode_mtc(struct pt_query_decoder *decoder) 3223 { 3224 struct pt_packet_mtc packet; 3225 int size, errcode; 3226 3227 if (!decoder) 3228 return -pte_internal; 3229 3230 size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config); 3231 if (size < 0) 3232 return size; 3233 3234 errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal, 3235 &packet, &decoder->config); 3236 if (errcode < 0) 3237 return errcode; 3238 3239 decoder->pos += size; 3240 return 0; 3241 } 3242 3243 static int check_erratum_skd007(struct pt_query_decoder *decoder, 3244 const struct pt_packet_cyc *packet, int size) 3245 { 3246 const uint8_t *pos; 3247 uint16_t payload; 3248 3249 if (!decoder || !packet || size < 0) 3250 return -pte_internal; 3251 3252 /* It must be a 2-byte CYC. */ 3253 if (size != 2) 3254 return 0; 3255 3256 payload = (uint16_t) packet->value; 3257 3258 /* The 2nd byte of the CYC payload must look like an ext opcode. */ 3259 if ((payload & ~0x1f) != 0x20) 3260 return 0; 3261 3262 /* Skip this CYC packet. */ 3263 pos = decoder->pos + size; 3264 if (decoder->config.end <= pos) 3265 return 0; 3266 3267 /* See if we got a second CYC that looks like an OVF ext opcode. */ 3268 if (*pos != pt_ext_ovf) 3269 return 0; 3270 3271 /* We shouldn't get back-to-back CYCs unless they are sent when the 3272 * counter wraps around. In this case, we'd expect a full payload. 3273 * 3274 * Since we got two non-full CYC packets, we assume the erratum hit. 3275 */ 3276 3277 return 1; 3278 } 3279 3280 int pt_qry_decode_cyc(struct pt_query_decoder *decoder) 3281 { 3282 struct pt_packet_cyc packet; 3283 struct pt_config *config; 3284 int size, errcode; 3285 3286 if (!decoder) 3287 return -pte_internal; 3288 3289 config = &decoder->config; 3290 3291 size = pt_pkt_read_cyc(&packet, decoder->pos, config); 3292 if (size < 0) 3293 return size; 3294 3295 if (config->errata.skd007) { 3296 errcode = check_erratum_skd007(decoder, &packet, size); 3297 if (errcode < 0) 3298 return errcode; 3299 3300 /* If the erratum hits, we ignore the partial CYC and instead 3301 * process the OVF following/overlapping it. 3302 */ 3303 if (errcode) { 3304 /* We skip the first byte of the CYC, which brings us 3305 * to the beginning of the OVF packet. 3306 */ 3307 decoder->pos += 1; 3308 return 0; 3309 } 3310 } 3311 3312 errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal, 3313 &packet, config); 3314 if (errcode < 0) 3315 return errcode; 3316 3317 decoder->pos += size; 3318 return 0; 3319 } 3320 3321 int pt_qry_decode_stop(struct pt_query_decoder *decoder) 3322 { 3323 struct pt_event *event; 3324 int errcode; 3325 3326 if (!decoder) 3327 return -pte_internal; 3328 3329 /* Stop events are reported immediately. */ 3330 event = pt_evq_standalone(&decoder->evq); 3331 if (!event) 3332 return -pte_internal; 3333 3334 event->type = ptev_stop; 3335 3336 decoder->event = event; 3337 3338 errcode = pt_qry_event_time(event, decoder); 3339 if (errcode < 0) 3340 return errcode; 3341 3342 decoder->pos += ptps_stop; 3343 return 0; 3344 } 3345 3346 int pt_qry_header_vmcs(struct pt_query_decoder *decoder) 3347 { 3348 struct pt_packet_vmcs packet; 3349 struct pt_event *event; 3350 int size; 3351 3352 if (!decoder) 3353 return -pte_internal; 3354 3355 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config); 3356 if (size < 0) 3357 return size; 3358 3359 event = pt_evq_enqueue(&decoder->evq, evb_psbend); 3360 if (!event) 3361 return -pte_nomem; 3362 3363 event->type = ptev_async_vmcs; 3364 event->variant.async_vmcs.base = packet.base; 3365 3366 decoder->pos += size; 3367 return 0; 3368 } 3369 3370 int pt_qry_decode_vmcs(struct pt_query_decoder *decoder) 3371 { 3372 struct pt_packet_vmcs packet; 3373 struct pt_event *event; 3374 int size, errcode; 3375 3376 if (!decoder) 3377 return -pte_internal; 3378 3379 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config); 3380 if (size < 0) 3381 return size; 3382 3383 /* VMCS events bind to the same IP as an in-flight async paging event. 3384 * 3385 * In that case, the VMCS event should be applied first. We reorder 3386 * events here to simplify the life of higher layers. 3387 */ 3388 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging); 3389 if (event) { 3390 struct pt_event *paging; 3391 3392 paging = pt_evq_enqueue(&decoder->evq, evb_tip); 3393 if (!paging) 3394 return -pte_nomem; 3395 3396 *paging = *event; 3397 3398 event->type = ptev_async_vmcs; 3399 event->variant.async_vmcs.base = packet.base; 3400 3401 decoder->pos += size; 3402 return 0; 3403 } 3404 3405 /* VMCS events bind to the same TIP packet as an in-flight async 3406 * branch event. 3407 */ 3408 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch); 3409 if (event) { 3410 event = pt_evq_enqueue(&decoder->evq, evb_tip); 3411 if (!event) 3412 return -pte_nomem; 3413 3414 event->type = ptev_async_vmcs; 3415 event->variant.async_vmcs.base = packet.base; 3416 3417 decoder->pos += size; 3418 return 0; 3419 } 3420 3421 /* VMCS events that do not bind to an in-flight async event are 3422 * stand-alone. 3423 */ 3424 event = pt_evq_standalone(&decoder->evq); 3425 if (!event) 3426 return -pte_internal; 3427 3428 event->type = ptev_vmcs; 3429 event->variant.vmcs.base = packet.base; 3430 3431 decoder->event = event; 3432 3433 errcode = pt_qry_event_time(event, decoder); 3434 if (errcode < 0) 3435 return errcode; 3436 3437 decoder->pos += size; 3438 return 0; 3439 } 3440 3441 int pt_qry_decode_mnt(struct pt_query_decoder *decoder) 3442 { 3443 struct pt_packet_mnt packet; 3444 struct pt_event *event; 3445 int size, errcode; 3446 3447 if (!decoder) 3448 return -pte_internal; 3449 3450 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config); 3451 if (size < 0) 3452 return size; 3453 3454 event = pt_evq_standalone(&decoder->evq); 3455 if (!event) 3456 return -pte_internal; 3457 3458 event->type = ptev_mnt; 3459 event->variant.mnt.payload = packet.payload; 3460 3461 decoder->event = event; 3462 3463 errcode = pt_qry_event_time(event, decoder); 3464 if (errcode < 0) 3465 return errcode; 3466 3467 decoder->pos += size; 3468 3469 return 0; 3470 } 3471 3472 int pt_qry_header_mnt(struct pt_query_decoder *decoder) 3473 { 3474 struct pt_packet_mnt packet; 3475 struct pt_event *event; 3476 int size; 3477 3478 if (!decoder) 3479 return -pte_internal; 3480 3481 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config); 3482 if (size < 0) 3483 return size; 3484 3485 event = pt_evq_enqueue(&decoder->evq, evb_psbend); 3486 if (!event) 3487 return -pte_nomem; 3488 3489 event->type = ptev_mnt; 3490 event->variant.mnt.payload = packet.payload; 3491 3492 decoder->pos += size; 3493 3494 return 0; 3495 } 3496 3497 int pt_qry_decode_exstop(struct pt_query_decoder *decoder) 3498 { 3499 struct pt_packet_exstop packet; 3500 struct pt_event *event; 3501 int size; 3502 3503 if (!decoder) 3504 return -pte_internal; 3505 3506 size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config); 3507 if (size < 0) 3508 return size; 3509 3510 if (packet.ip) { 3511 event = pt_evq_enqueue(&decoder->evq, evb_fup); 3512 if (!event) 3513 return -pte_internal; 3514 3515 event->type = ptev_exstop; 3516 } else { 3517 event = pt_evq_standalone(&decoder->evq); 3518 if (!event) 3519 return -pte_internal; 3520 3521 event->type = ptev_exstop; 3522 3523 event->ip_suppressed = 1; 3524 event->variant.exstop.ip = 0ull; 3525 3526 decoder->event = event; 3527 } 3528 3529 decoder->pos += size; 3530 return 0; 3531 } 3532 3533 int pt_qry_decode_mwait(struct pt_query_decoder *decoder) 3534 { 3535 struct pt_packet_mwait packet; 3536 struct pt_event *event; 3537 int size; 3538 3539 if (!decoder) 3540 return -pte_internal; 3541 3542 size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config); 3543 if (size < 0) 3544 return size; 3545 3546 event = pt_evq_enqueue(&decoder->evq, evb_fup); 3547 if (!event) 3548 return -pte_internal; 3549 3550 event->type = ptev_mwait; 3551 event->variant.mwait.hints = packet.hints; 3552 event->variant.mwait.ext = packet.ext; 3553 3554 decoder->pos += size; 3555 return 0; 3556 } 3557 3558 int pt_qry_decode_pwre(struct pt_query_decoder *decoder) 3559 { 3560 struct pt_packet_pwre packet; 3561 struct pt_event *event; 3562 int size; 3563 3564 if (!decoder) 3565 return -pte_internal; 3566 3567 size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config); 3568 if (size < 0) 3569 return size; 3570 3571 event = pt_evq_standalone(&decoder->evq); 3572 if (!event) 3573 return -pte_internal; 3574 3575 event->type = ptev_pwre; 3576 event->variant.pwre.state = packet.state; 3577 event->variant.pwre.sub_state = packet.sub_state; 3578 3579 if (packet.hw) 3580 event->variant.pwre.hw = 1; 3581 3582 decoder->event = event; 3583 3584 decoder->pos += size; 3585 return 0; 3586 } 3587 3588 int pt_qry_decode_pwrx(struct pt_query_decoder *decoder) 3589 { 3590 struct pt_packet_pwrx packet; 3591 struct pt_event *event; 3592 int size; 3593 3594 if (!decoder) 3595 return -pte_internal; 3596 3597 size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config); 3598 if (size < 0) 3599 return size; 3600 3601 event = pt_evq_standalone(&decoder->evq); 3602 if (!event) 3603 return -pte_internal; 3604 3605 event->type = ptev_pwrx; 3606 event->variant.pwrx.last = packet.last; 3607 event->variant.pwrx.deepest = packet.deepest; 3608 3609 if (packet.interrupt) 3610 event->variant.pwrx.interrupt = 1; 3611 if (packet.store) 3612 event->variant.pwrx.store = 1; 3613 if (packet.autonomous) 3614 event->variant.pwrx.autonomous = 1; 3615 3616 decoder->event = event; 3617 3618 decoder->pos += size; 3619 return 0; 3620 } 3621 3622 int pt_qry_decode_ptw(struct pt_query_decoder *decoder) 3623 { 3624 struct pt_packet_ptw packet; 3625 struct pt_event *event; 3626 int size, pls; 3627 3628 if (!decoder) 3629 return -pte_internal; 3630 3631 size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config); 3632 if (size < 0) 3633 return size; 3634 3635 pls = pt_ptw_size(packet.plc); 3636 if (pls < 0) 3637 return pls; 3638 3639 if (packet.ip) { 3640 event = pt_evq_enqueue(&decoder->evq, evb_fup); 3641 if (!event) 3642 return -pte_internal; 3643 } else { 3644 event = pt_evq_standalone(&decoder->evq); 3645 if (!event) 3646 return -pte_internal; 3647 3648 event->ip_suppressed = 1; 3649 3650 decoder->event = event; 3651 } 3652 3653 event->type = ptev_ptwrite; 3654 event->variant.ptwrite.size = (uint8_t) pls; 3655 event->variant.ptwrite.payload = packet.payload; 3656 3657 decoder->pos += size; 3658 return 0; 3659 } 3660