1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 28 */ 29 30 #include <stdlib.h> 31 #include <strings.h> 32 #include <errno.h> 33 #include <unistd.h> 34 #include <limits.h> 35 #include <assert.h> 36 #include <ctype.h> 37 #include <alloca.h> 38 #include <dt_impl.h> 39 40 #define DT_MASK_LO 0x00000000FFFFFFFFULL 41 42 /* 43 * We declare this here because (1) we need it and (2) we want to avoid a 44 * dependency on libm in libdtrace. 45 */ 46 static long double 47 dt_fabsl(long double x) 48 { 49 if (x < 0) 50 return (-x); 51 52 return (x); 53 } 54 55 /* 56 * 128-bit arithmetic functions needed to support the stddev() aggregating 57 * action. 58 */ 59 static int 60 dt_gt_128(uint64_t *a, uint64_t *b) 61 { 62 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0])); 63 } 64 65 static int 66 dt_ge_128(uint64_t *a, uint64_t *b) 67 { 68 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0])); 69 } 70 71 static int 72 dt_le_128(uint64_t *a, uint64_t *b) 73 { 74 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); 75 } 76 77 /* 78 * Shift the 128-bit value in a by b. If b is positive, shift left. 79 * If b is negative, shift right. 80 */ 81 static void 82 dt_shift_128(uint64_t *a, int b) 83 { 84 uint64_t mask; 85 86 if (b == 0) 87 return; 88 89 if (b < 0) { 90 b = -b; 91 if (b >= 64) { 92 a[0] = a[1] >> (b - 64); 93 a[1] = 0; 94 } else { 95 a[0] >>= b; 96 mask = 1LL << (64 - b); 97 mask -= 1; 98 a[0] |= ((a[1] & mask) << (64 - b)); 99 a[1] >>= b; 100 } 101 } else { 102 if (b >= 64) { 103 a[1] = a[0] << (b - 64); 104 a[0] = 0; 105 } else { 106 a[1] <<= b; 107 mask = a[0] >> (64 - b); 108 a[1] |= mask; 109 a[0] <<= b; 110 } 111 } 112 } 113 114 static int 115 dt_nbits_128(uint64_t *a) 116 { 117 int nbits = 0; 118 uint64_t tmp[2]; 119 uint64_t zero[2] = { 0, 0 }; 120 121 tmp[0] = a[0]; 122 tmp[1] = a[1]; 123 124 dt_shift_128(tmp, -1); 125 while (dt_gt_128(tmp, zero)) { 126 dt_shift_128(tmp, -1); 127 nbits++; 128 } 129 130 return (nbits); 131 } 132 133 static void 134 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference) 135 { 136 uint64_t result[2]; 137 138 result[0] = minuend[0] - subtrahend[0]; 139 result[1] = minuend[1] - subtrahend[1] - 140 (minuend[0] < subtrahend[0] ? 1 : 0); 141 142 difference[0] = result[0]; 143 difference[1] = result[1]; 144 } 145 146 static void 147 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 148 { 149 uint64_t result[2]; 150 151 result[0] = addend1[0] + addend2[0]; 152 result[1] = addend1[1] + addend2[1] + 153 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 154 155 sum[0] = result[0]; 156 sum[1] = result[1]; 157 } 158 159 /* 160 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 161 * use native multiplication on those, and then re-combine into the 162 * resulting 128-bit value. 163 * 164 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 165 * hi1 * hi2 << 64 + 166 * hi1 * lo2 << 32 + 167 * hi2 * lo1 << 32 + 168 * lo1 * lo2 169 */ 170 static void 171 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 172 { 173 uint64_t hi1, hi2, lo1, lo2; 174 uint64_t tmp[2]; 175 176 hi1 = factor1 >> 32; 177 hi2 = factor2 >> 32; 178 179 lo1 = factor1 & DT_MASK_LO; 180 lo2 = factor2 & DT_MASK_LO; 181 182 product[0] = lo1 * lo2; 183 product[1] = hi1 * hi2; 184 185 tmp[0] = hi1 * lo2; 186 tmp[1] = 0; 187 dt_shift_128(tmp, 32); 188 dt_add_128(product, tmp, product); 189 190 tmp[0] = hi2 * lo1; 191 tmp[1] = 0; 192 dt_shift_128(tmp, 32); 193 dt_add_128(product, tmp, product); 194 } 195 196 /* 197 * This is long-hand division. 198 * 199 * We initialize subtrahend by shifting divisor left as far as possible. We 200 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we 201 * subtract and set the appropriate bit in the result. We then shift 202 * subtrahend right by one bit for the next comparison. 203 */ 204 static void 205 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient) 206 { 207 uint64_t result[2] = { 0, 0 }; 208 uint64_t remainder[2]; 209 uint64_t subtrahend[2]; 210 uint64_t divisor_128[2]; 211 uint64_t mask[2] = { 1, 0 }; 212 int log = 0; 213 214 assert(divisor != 0); 215 216 divisor_128[0] = divisor; 217 divisor_128[1] = 0; 218 219 remainder[0] = dividend[0]; 220 remainder[1] = dividend[1]; 221 222 subtrahend[0] = divisor; 223 subtrahend[1] = 0; 224 225 while (divisor > 0) { 226 log++; 227 divisor >>= 1; 228 } 229 230 dt_shift_128(subtrahend, 128 - log); 231 dt_shift_128(mask, 128 - log); 232 233 while (dt_ge_128(remainder, divisor_128)) { 234 if (dt_ge_128(remainder, subtrahend)) { 235 dt_subtract_128(remainder, subtrahend, remainder); 236 result[0] |= mask[0]; 237 result[1] |= mask[1]; 238 } 239 240 dt_shift_128(subtrahend, -1); 241 dt_shift_128(mask, -1); 242 } 243 244 quotient[0] = result[0]; 245 quotient[1] = result[1]; 246 } 247 248 /* 249 * This is the long-hand method of calculating a square root. 250 * The algorithm is as follows: 251 * 252 * 1. Group the digits by 2 from the right. 253 * 2. Over the leftmost group, find the largest single-digit number 254 * whose square is less than that group. 255 * 3. Subtract the result of the previous step (2 or 4, depending) and 256 * bring down the next two-digit group. 257 * 4. For the result R we have so far, find the largest single-digit number 258 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3. 259 * (Note that this is doubling R and performing a decimal left-shift by 1 260 * and searching for the appropriate decimal to fill the one's place.) 261 * The value x is the next digit in the square root. 262 * Repeat steps 3 and 4 until the desired precision is reached. (We're 263 * dealing with integers, so the above is sufficient.) 264 * 265 * In decimal, the square root of 582,734 would be calculated as so: 266 * 267 * __7__6__3 268 * | 58 27 34 269 * -49 (7^2 == 49 => 7 is the first digit in the square root) 270 * -- 271 * 9 27 (Subtract and bring down the next group.) 272 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in 273 * ----- the square root) 274 * 51 34 (Subtract and bring down the next group.) 275 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in 276 * ----- the square root) 277 * 5 65 (remainder) 278 * 279 * The above algorithm applies similarly in binary, but note that the 280 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a 281 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the 282 * preceding difference? 283 * 284 * In binary, the square root of 11011011 would be calculated as so: 285 * 286 * __1__1__1__0 287 * | 11 01 10 11 288 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1) 289 * -- 290 * 10 01 10 11 291 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1) 292 * ----- 293 * 1 00 10 11 294 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1) 295 * ------- 296 * 1 01 11 297 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0) 298 * 299 */ 300 static uint64_t 301 dt_sqrt_128(uint64_t *square) 302 { 303 uint64_t result[2] = { 0, 0 }; 304 uint64_t diff[2] = { 0, 0 }; 305 uint64_t one[2] = { 1, 0 }; 306 uint64_t next_pair[2]; 307 uint64_t next_try[2]; 308 uint64_t bit_pairs, pair_shift; 309 int i; 310 311 bit_pairs = dt_nbits_128(square) / 2; 312 pair_shift = bit_pairs * 2; 313 314 for (i = 0; i <= bit_pairs; i++) { 315 /* 316 * Bring down the next pair of bits. 317 */ 318 next_pair[0] = square[0]; 319 next_pair[1] = square[1]; 320 dt_shift_128(next_pair, -pair_shift); 321 next_pair[0] &= 0x3; 322 next_pair[1] = 0; 323 324 dt_shift_128(diff, 2); 325 dt_add_128(diff, next_pair, diff); 326 327 /* 328 * next_try = R << 2 + 1 329 */ 330 next_try[0] = result[0]; 331 next_try[1] = result[1]; 332 dt_shift_128(next_try, 2); 333 dt_add_128(next_try, one, next_try); 334 335 if (dt_le_128(next_try, diff)) { 336 dt_subtract_128(diff, next_try, diff); 337 dt_shift_128(result, 1); 338 dt_add_128(result, one, result); 339 } else { 340 dt_shift_128(result, 1); 341 } 342 343 pair_shift -= 2; 344 } 345 346 assert(result[1] == 0); 347 348 return (result[0]); 349 } 350 351 uint64_t 352 dt_stddev(uint64_t *data, uint64_t normal) 353 { 354 uint64_t avg_of_squares[2]; 355 uint64_t square_of_avg[2]; 356 int64_t norm_avg; 357 uint64_t diff[2]; 358 359 /* 360 * The standard approximation for standard deviation is 361 * sqrt(average(x**2) - average(x)**2), i.e. the square root 362 * of the average of the squares minus the square of the average. 363 */ 364 dt_divide_128(data + 2, normal, avg_of_squares); 365 dt_divide_128(avg_of_squares, data[0], avg_of_squares); 366 367 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0]; 368 369 if (norm_avg < 0) 370 norm_avg = -norm_avg; 371 372 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg); 373 374 dt_subtract_128(avg_of_squares, square_of_avg, diff); 375 376 return (dt_sqrt_128(diff)); 377 } 378 379 static int 380 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last, 381 dtrace_bufdesc_t *buf, size_t offs) 382 { 383 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd; 384 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd; 385 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub; 386 dtrace_flowkind_t flow = DTRACEFLOW_NONE; 387 const char *str = NULL; 388 static const char *e_str[2] = { " -> ", " => " }; 389 static const char *r_str[2] = { " <- ", " <= " }; 390 static const char *ent = "entry", *ret = "return"; 391 static int entlen = 0, retlen = 0; 392 dtrace_epid_t next, id = epd->dtepd_epid; 393 int rval; 394 395 if (entlen == 0) { 396 assert(retlen == 0); 397 entlen = strlen(ent); 398 retlen = strlen(ret); 399 } 400 401 /* 402 * If the name of the probe is "entry" or ends with "-entry", we 403 * treat it as an entry; if it is "return" or ends with "-return", 404 * we treat it as a return. (This allows application-provided probes 405 * like "method-entry" or "function-entry" to participate in flow 406 * indentation -- without accidentally misinterpreting popular probe 407 * names like "carpentry", "gentry" or "Coventry".) 408 */ 409 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' && 410 (sub == n || sub[-1] == '-')) { 411 flow = DTRACEFLOW_ENTRY; 412 str = e_str[strcmp(p, "syscall") == 0]; 413 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' && 414 (sub == n || sub[-1] == '-')) { 415 flow = DTRACEFLOW_RETURN; 416 str = r_str[strcmp(p, "syscall") == 0]; 417 } 418 419 /* 420 * If we're going to indent this, we need to check the ID of our last 421 * call. If we're looking at the same probe ID but a different EPID, 422 * we _don't_ want to indent. (Yes, there are some minor holes in 423 * this scheme -- it's a heuristic.) 424 */ 425 if (flow == DTRACEFLOW_ENTRY) { 426 if ((last != DTRACE_EPIDNONE && id != last && 427 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id)) 428 flow = DTRACEFLOW_NONE; 429 } 430 431 /* 432 * If we're going to unindent this, it's more difficult to see if 433 * we don't actually want to unindent it -- we need to look at the 434 * _next_ EPID. 435 */ 436 if (flow == DTRACEFLOW_RETURN) { 437 offs += epd->dtepd_size; 438 439 do { 440 if (offs >= buf->dtbd_size) { 441 /* 442 * We're at the end -- maybe. If the oldest 443 * record is non-zero, we need to wrap. 444 */ 445 if (buf->dtbd_oldest != 0) { 446 offs = 0; 447 } else { 448 goto out; 449 } 450 } 451 452 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 453 454 if (next == DTRACE_EPIDNONE) 455 offs += sizeof (id); 456 } while (next == DTRACE_EPIDNONE); 457 458 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0) 459 return (rval); 460 461 if (next != id && npd->dtpd_id == pd->dtpd_id) 462 flow = DTRACEFLOW_NONE; 463 } 464 465 out: 466 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) { 467 data->dtpda_prefix = str; 468 } else { 469 data->dtpda_prefix = "| "; 470 } 471 472 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0) 473 data->dtpda_indent -= 2; 474 475 data->dtpda_flow = flow; 476 477 return (0); 478 } 479 480 static int 481 dt_nullprobe() 482 { 483 return (DTRACE_CONSUME_THIS); 484 } 485 486 static int 487 dt_nullrec() 488 { 489 return (DTRACE_CONSUME_NEXT); 490 } 491 492 int 493 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 494 uint64_t normal, long double total, char positives, char negatives) 495 { 496 long double f; 497 uint_t depth, len = 40; 498 499 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"; 500 const char *spaces = " "; 501 502 assert(strlen(ats) == len && strlen(spaces) == len); 503 assert(!(total == 0 && (positives || negatives))); 504 assert(!(val < 0 && !negatives)); 505 assert(!(val > 0 && !positives)); 506 assert(!(val != 0 && total == 0)); 507 508 if (!negatives) { 509 if (positives) { 510 f = (dt_fabsl((long double)val) * len) / total; 511 depth = (uint_t)(f + 0.5); 512 } else { 513 depth = 0; 514 } 515 516 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth, 517 spaces + depth, (long long)val / normal)); 518 } 519 520 if (!positives) { 521 f = (dt_fabsl((long double)val) * len) / total; 522 depth = (uint_t)(f + 0.5); 523 524 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth, 525 ats + len - depth, (long long)val / normal)); 526 } 527 528 /* 529 * If we're here, we have both positive and negative bucket values. 530 * To express this graphically, we're going to generate both positive 531 * and negative bars separated by a centerline. These bars are half 532 * the size of normal quantize()/lquantize() bars, so we divide the 533 * length in half before calculating the bar length. 534 */ 535 len /= 2; 536 ats = &ats[len]; 537 spaces = &spaces[len]; 538 539 f = (dt_fabsl((long double)val) * len) / total; 540 depth = (uint_t)(f + 0.5); 541 542 if (val <= 0) { 543 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth, 544 ats + len - depth, len, "", (long long)val / normal)); 545 } else { 546 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "", 547 ats + len - depth, spaces + depth, 548 (long long)val / normal)); 549 } 550 } 551 552 int 553 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 554 size_t size, uint64_t normal) 555 { 556 const int64_t *data = addr; 557 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1; 558 long double total = 0; 559 char positives = 0, negatives = 0; 560 561 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 562 return (dt_set_errno(dtp, EDT_DMISMATCH)); 563 564 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0) 565 first_bin++; 566 567 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) { 568 /* 569 * There isn't any data. This is possible if (and only if) 570 * negative increment values have been used. In this case, 571 * we'll print the buckets around 0. 572 */ 573 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1; 574 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1; 575 } else { 576 if (first_bin > 0) 577 first_bin--; 578 579 while (last_bin > 0 && data[last_bin] == 0) 580 last_bin--; 581 582 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1) 583 last_bin++; 584 } 585 586 for (i = first_bin; i <= last_bin; i++) { 587 positives |= (data[i] > 0); 588 negatives |= (data[i] < 0); 589 total += dt_fabsl((long double)data[i]); 590 } 591 592 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 593 "------------- Distribution -------------", "count") < 0) 594 return (-1); 595 596 for (i = first_bin; i <= last_bin; i++) { 597 if (dt_printf(dtp, fp, "%16lld ", 598 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0) 599 return (-1); 600 601 if (dt_print_quantline(dtp, fp, data[i], normal, total, 602 positives, negatives) < 0) 603 return (-1); 604 } 605 606 return (0); 607 } 608 609 int 610 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 611 size_t size, uint64_t normal) 612 { 613 const int64_t *data = addr; 614 int i, first_bin, last_bin, base; 615 uint64_t arg; 616 long double total = 0; 617 uint16_t step, levels; 618 char positives = 0, negatives = 0; 619 620 if (size < sizeof (uint64_t)) 621 return (dt_set_errno(dtp, EDT_DMISMATCH)); 622 623 arg = *data++; 624 size -= sizeof (uint64_t); 625 626 base = DTRACE_LQUANTIZE_BASE(arg); 627 step = DTRACE_LQUANTIZE_STEP(arg); 628 levels = DTRACE_LQUANTIZE_LEVELS(arg); 629 630 first_bin = 0; 631 last_bin = levels + 1; 632 633 if (size != sizeof (uint64_t) * (levels + 2)) 634 return (dt_set_errno(dtp, EDT_DMISMATCH)); 635 636 while (first_bin <= levels + 1 && data[first_bin] == 0) 637 first_bin++; 638 639 if (first_bin > levels + 1) { 640 first_bin = 0; 641 last_bin = 2; 642 } else { 643 if (first_bin > 0) 644 first_bin--; 645 646 while (last_bin > 0 && data[last_bin] == 0) 647 last_bin--; 648 649 if (last_bin < levels + 1) 650 last_bin++; 651 } 652 653 for (i = first_bin; i <= last_bin; i++) { 654 positives |= (data[i] > 0); 655 negatives |= (data[i] < 0); 656 total += dt_fabsl((long double)data[i]); 657 } 658 659 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 660 "------------- Distribution -------------", "count") < 0) 661 return (-1); 662 663 for (i = first_bin; i <= last_bin; i++) { 664 char c[32]; 665 int err; 666 667 if (i == 0) { 668 (void) snprintf(c, sizeof (c), "< %d", 669 base / (uint32_t)normal); 670 err = dt_printf(dtp, fp, "%16s ", c); 671 } else if (i == levels + 1) { 672 (void) snprintf(c, sizeof (c), ">= %d", 673 base + (levels * step)); 674 err = dt_printf(dtp, fp, "%16s ", c); 675 } else { 676 err = dt_printf(dtp, fp, "%16d ", 677 base + (i - 1) * step); 678 } 679 680 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal, 681 total, positives, negatives) < 0) 682 return (-1); 683 } 684 685 return (0); 686 } 687 688 int 689 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 690 size_t size, uint64_t normal) 691 { 692 int i, first_bin, last_bin, bin = 1, order, levels; 693 uint16_t factor, low, high, nsteps; 694 const int64_t *data = addr; 695 int64_t value = 1, next, step; 696 char positives = 0, negatives = 0; 697 long double total = 0; 698 uint64_t arg; 699 char c[32]; 700 701 if (size < sizeof (uint64_t)) 702 return (dt_set_errno(dtp, EDT_DMISMATCH)); 703 704 arg = *data++; 705 size -= sizeof (uint64_t); 706 707 factor = DTRACE_LLQUANTIZE_FACTOR(arg); 708 low = DTRACE_LLQUANTIZE_LOW(arg); 709 high = DTRACE_LLQUANTIZE_HIGH(arg); 710 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 711 712 /* 713 * We don't expect to be handed invalid llquantize() parameters here, 714 * but sanity check them (to a degree) nonetheless. 715 */ 716 if (size > INT32_MAX || factor < 2 || low >= high || 717 nsteps == 0 || factor > nsteps) 718 return (dt_set_errno(dtp, EDT_DMISMATCH)); 719 720 levels = (int)size / sizeof (uint64_t); 721 722 first_bin = 0; 723 last_bin = levels - 1; 724 725 while (first_bin < levels && data[first_bin] == 0) 726 first_bin++; 727 728 if (first_bin == levels) { 729 first_bin = 0; 730 last_bin = 1; 731 } else { 732 if (first_bin > 0) 733 first_bin--; 734 735 while (last_bin > 0 && data[last_bin] == 0) 736 last_bin--; 737 738 if (last_bin < levels - 1) 739 last_bin++; 740 } 741 742 for (i = first_bin; i <= last_bin; i++) { 743 positives |= (data[i] > 0); 744 negatives |= (data[i] < 0); 745 total += dt_fabsl((long double)data[i]); 746 } 747 748 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 749 "------------- Distribution -------------", "count") < 0) 750 return (-1); 751 752 for (order = 0; order < low; order++) 753 value *= factor; 754 755 next = value * factor; 756 step = next > nsteps ? next / nsteps : 1; 757 758 if (first_bin == 0) { 759 (void) snprintf(c, sizeof (c), "< %lld", value); 760 761 if (dt_printf(dtp, fp, "%16s ", c) < 0) 762 return (-1); 763 764 if (dt_print_quantline(dtp, fp, data[0], normal, 765 total, positives, negatives) < 0) 766 return (-1); 767 } 768 769 while (order <= high) { 770 if (bin >= first_bin && bin <= last_bin) { 771 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0) 772 return (-1); 773 774 if (dt_print_quantline(dtp, fp, data[bin], 775 normal, total, positives, negatives) < 0) 776 return (-1); 777 } 778 779 assert(value < next); 780 bin++; 781 782 if ((value += step) != next) 783 continue; 784 785 next = value * factor; 786 step = next > nsteps ? next / nsteps : 1; 787 order++; 788 } 789 790 if (last_bin < bin) 791 return (0); 792 793 assert(last_bin == bin); 794 (void) snprintf(c, sizeof (c), ">= %lld", value); 795 796 if (dt_printf(dtp, fp, "%16s ", c) < 0) 797 return (-1); 798 799 return (dt_print_quantline(dtp, fp, data[bin], normal, 800 total, positives, negatives)); 801 } 802 803 /*ARGSUSED*/ 804 static int 805 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 806 size_t size, uint64_t normal) 807 { 808 /* LINTED - alignment */ 809 int64_t *data = (int64_t *)addr; 810 811 return (dt_printf(dtp, fp, " %16lld", data[0] ? 812 (long long)(data[1] / (int64_t)normal / data[0]) : 0)); 813 } 814 815 /*ARGSUSED*/ 816 static int 817 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 818 size_t size, uint64_t normal) 819 { 820 /* LINTED - alignment */ 821 uint64_t *data = (uint64_t *)addr; 822 823 return (dt_printf(dtp, fp, " %16llu", data[0] ? 824 (unsigned long long) dt_stddev(data, normal) : 0)); 825 } 826 827 /*ARGSUSED*/ 828 int 829 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 830 size_t nbytes, int width, int quiet, int forceraw) 831 { 832 /* 833 * If the byte stream is a series of printable characters, followed by 834 * a terminating byte, we print it out as a string. Otherwise, we 835 * assume that it's something else and just print the bytes. 836 */ 837 int i, j, margin = 5; 838 char *c = (char *)addr; 839 840 if (nbytes == 0) 841 return (0); 842 843 if (forceraw) 844 goto raw; 845 846 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET) 847 goto raw; 848 849 for (i = 0; i < nbytes; i++) { 850 /* 851 * We define a "printable character" to be one for which 852 * isprint(3C) returns non-zero, isspace(3C) returns non-zero, 853 * or a character which is either backspace or the bell. 854 * Backspace and the bell are regrettably special because 855 * they fail the first two tests -- and yet they are entirely 856 * printable. These are the only two control characters that 857 * have meaning for the terminal and for which isprint(3C) and 858 * isspace(3C) return 0. 859 */ 860 if (isprint(c[i]) || isspace(c[i]) || 861 c[i] == '\b' || c[i] == '\a') 862 continue; 863 864 if (c[i] == '\0' && i > 0) { 865 /* 866 * This looks like it might be a string. Before we 867 * assume that it is indeed a string, check the 868 * remainder of the byte range; if it contains 869 * additional non-nul characters, we'll assume that 870 * it's a binary stream that just happens to look like 871 * a string, and we'll print out the individual bytes. 872 */ 873 for (j = i + 1; j < nbytes; j++) { 874 if (c[j] != '\0') 875 break; 876 } 877 878 if (j != nbytes) 879 break; 880 881 if (quiet) 882 return (dt_printf(dtp, fp, "%s", c)); 883 else 884 return (dt_printf(dtp, fp, " %-*s", width, c)); 885 } 886 887 break; 888 } 889 890 if (i == nbytes) { 891 /* 892 * The byte range is all printable characters, but there is 893 * no trailing nul byte. We'll assume that it's a string and 894 * print it as such. 895 */ 896 char *s = alloca(nbytes + 1); 897 bcopy(c, s, nbytes); 898 s[nbytes] = '\0'; 899 return (dt_printf(dtp, fp, " %-*s", width, s)); 900 } 901 902 raw: 903 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0) 904 return (-1); 905 906 for (i = 0; i < 16; i++) 907 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0) 908 return (-1); 909 910 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0) 911 return (-1); 912 913 914 for (i = 0; i < nbytes; i += 16) { 915 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0) 916 return (-1); 917 918 for (j = i; j < i + 16 && j < nbytes; j++) { 919 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0) 920 return (-1); 921 } 922 923 while (j++ % 16) { 924 if (dt_printf(dtp, fp, " ") < 0) 925 return (-1); 926 } 927 928 if (dt_printf(dtp, fp, " ") < 0) 929 return (-1); 930 931 for (j = i; j < i + 16 && j < nbytes; j++) { 932 if (dt_printf(dtp, fp, "%c", 933 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0) 934 return (-1); 935 } 936 937 if (dt_printf(dtp, fp, "\n") < 0) 938 return (-1); 939 } 940 941 return (0); 942 } 943 944 int 945 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 946 caddr_t addr, int depth, int size) 947 { 948 dtrace_syminfo_t dts; 949 GElf_Sym sym; 950 int i, indent; 951 char c[PATH_MAX * 2]; 952 uint64_t pc; 953 954 if (dt_printf(dtp, fp, "\n") < 0) 955 return (-1); 956 957 if (format == NULL) 958 format = "%s"; 959 960 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 961 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 962 else 963 indent = _dtrace_stkindent; 964 965 for (i = 0; i < depth; i++) { 966 switch (size) { 967 case sizeof (uint32_t): 968 /* LINTED - alignment */ 969 pc = *((uint32_t *)addr); 970 break; 971 972 case sizeof (uint64_t): 973 /* LINTED - alignment */ 974 pc = *((uint64_t *)addr); 975 break; 976 977 default: 978 return (dt_set_errno(dtp, EDT_BADSTACKPC)); 979 } 980 981 if (pc == NULL) 982 break; 983 984 addr += size; 985 986 if (dt_printf(dtp, fp, "%*s", indent, "") < 0) 987 return (-1); 988 989 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 990 if (pc > sym.st_value) { 991 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx", 992 dts.dts_object, dts.dts_name, 993 pc - sym.st_value); 994 } else { 995 (void) snprintf(c, sizeof (c), "%s`%s", 996 dts.dts_object, dts.dts_name); 997 } 998 } else { 999 /* 1000 * We'll repeat the lookup, but this time we'll specify 1001 * a NULL GElf_Sym -- indicating that we're only 1002 * interested in the containing module. 1003 */ 1004 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1005 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1006 dts.dts_object, pc); 1007 } else { 1008 (void) snprintf(c, sizeof (c), "0x%llx", pc); 1009 } 1010 } 1011 1012 if (dt_printf(dtp, fp, format, c) < 0) 1013 return (-1); 1014 1015 if (dt_printf(dtp, fp, "\n") < 0) 1016 return (-1); 1017 } 1018 1019 return (0); 1020 } 1021 1022 int 1023 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1024 caddr_t addr, uint64_t arg) 1025 { 1026 /* LINTED - alignment */ 1027 uint64_t *pc = (uint64_t *)addr; 1028 uint32_t depth = DTRACE_USTACK_NFRAMES(arg); 1029 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg); 1030 const char *strbase = addr + (depth + 1) * sizeof (uint64_t); 1031 const char *str = strsize ? strbase : NULL; 1032 int err = 0; 1033 1034 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2]; 1035 struct ps_prochandle *P; 1036 GElf_Sym sym; 1037 int i, indent; 1038 pid_t pid; 1039 1040 if (depth == 0) 1041 return (0); 1042 1043 pid = (pid_t)*pc++; 1044 1045 if (dt_printf(dtp, fp, "\n") < 0) 1046 return (-1); 1047 1048 if (format == NULL) 1049 format = "%s"; 1050 1051 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1052 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1053 else 1054 indent = _dtrace_stkindent; 1055 1056 /* 1057 * Ultimately, we need to add an entry point in the library vector for 1058 * determining <symbol, offset> from <pid, address>. For now, if 1059 * this is a vector open, we just print the raw address or string. 1060 */ 1061 if (dtp->dt_vector == NULL) 1062 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1063 else 1064 P = NULL; 1065 1066 if (P != NULL) 1067 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1068 1069 for (i = 0; i < depth && pc[i] != NULL; i++) { 1070 const prmap_t *map; 1071 1072 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1073 break; 1074 1075 if (P != NULL && Plookup_by_addr(P, pc[i], 1076 name, sizeof (name), &sym) == 0) { 1077 (void) Pobjname(P, pc[i], objname, sizeof (objname)); 1078 1079 if (pc[i] > sym.st_value) { 1080 (void) snprintf(c, sizeof (c), 1081 "%s`%s+0x%llx", dt_basename(objname), name, 1082 (u_longlong_t)(pc[i] - sym.st_value)); 1083 } else { 1084 (void) snprintf(c, sizeof (c), 1085 "%s`%s", dt_basename(objname), name); 1086 } 1087 } else if (str != NULL && str[0] != '\0' && str[0] != '@' && 1088 (P != NULL && ((map = Paddr_to_map(P, pc[i])) == NULL || 1089 (map->pr_mflags & MA_WRITE)))) { 1090 /* 1091 * If the current string pointer in the string table 1092 * does not point to an empty string _and_ the program 1093 * counter falls in a writable region, we'll use the 1094 * string from the string table instead of the raw 1095 * address. This last condition is necessary because 1096 * some (broken) ustack helpers will return a string 1097 * even for a program counter that they can't 1098 * identify. If we have a string for a program 1099 * counter that falls in a segment that isn't 1100 * writable, we assume that we have fallen into this 1101 * case and we refuse to use the string. 1102 */ 1103 (void) snprintf(c, sizeof (c), "%s", str); 1104 } else { 1105 if (P != NULL && Pobjname(P, pc[i], objname, 1106 sizeof (objname)) != NULL) { 1107 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1108 dt_basename(objname), (u_longlong_t)pc[i]); 1109 } else { 1110 (void) snprintf(c, sizeof (c), "0x%llx", 1111 (u_longlong_t)pc[i]); 1112 } 1113 } 1114 1115 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1116 break; 1117 1118 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1119 break; 1120 1121 if (str != NULL && str[0] == '@') { 1122 /* 1123 * If the first character of the string is an "at" sign, 1124 * then the string is inferred to be an annotation -- 1125 * and it is printed out beneath the frame and offset 1126 * with brackets. 1127 */ 1128 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1129 break; 1130 1131 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]); 1132 1133 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1134 break; 1135 1136 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1137 break; 1138 } 1139 1140 if (str != NULL) { 1141 str += strlen(str) + 1; 1142 if (str - strbase >= strsize) 1143 str = NULL; 1144 } 1145 } 1146 1147 if (P != NULL) { 1148 dt_proc_unlock(dtp, P); 1149 dt_proc_release(dtp, P); 1150 } 1151 1152 return (err); 1153 } 1154 1155 static int 1156 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act) 1157 { 1158 /* LINTED - alignment */ 1159 uint64_t pid = ((uint64_t *)addr)[0]; 1160 /* LINTED - alignment */ 1161 uint64_t pc = ((uint64_t *)addr)[1]; 1162 const char *format = " %-50s"; 1163 char *s; 1164 int n, len = 256; 1165 1166 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) { 1167 struct ps_prochandle *P; 1168 1169 if ((P = dt_proc_grab(dtp, pid, 1170 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) { 1171 GElf_Sym sym; 1172 1173 dt_proc_lock(dtp, P); 1174 1175 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0) 1176 pc = sym.st_value; 1177 1178 dt_proc_unlock(dtp, P); 1179 dt_proc_release(dtp, P); 1180 } 1181 } 1182 1183 do { 1184 n = len; 1185 s = alloca(n); 1186 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n); 1187 1188 return (dt_printf(dtp, fp, format, s)); 1189 } 1190 1191 int 1192 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1193 { 1194 /* LINTED - alignment */ 1195 uint64_t pid = ((uint64_t *)addr)[0]; 1196 /* LINTED - alignment */ 1197 uint64_t pc = ((uint64_t *)addr)[1]; 1198 int err = 0; 1199 1200 char objname[PATH_MAX], c[PATH_MAX * 2]; 1201 struct ps_prochandle *P; 1202 1203 if (format == NULL) 1204 format = " %-50s"; 1205 1206 /* 1207 * See the comment in dt_print_ustack() for the rationale for 1208 * printing raw addresses in the vectored case. 1209 */ 1210 if (dtp->dt_vector == NULL) 1211 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1212 else 1213 P = NULL; 1214 1215 if (P != NULL) 1216 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1217 1218 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != NULL) { 1219 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname)); 1220 } else { 1221 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1222 } 1223 1224 err = dt_printf(dtp, fp, format, c); 1225 1226 if (P != NULL) { 1227 dt_proc_unlock(dtp, P); 1228 dt_proc_release(dtp, P); 1229 } 1230 1231 return (err); 1232 } 1233 1234 static int 1235 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1236 { 1237 /* LINTED - alignment */ 1238 uint64_t pc = *((uint64_t *)addr); 1239 dtrace_syminfo_t dts; 1240 GElf_Sym sym; 1241 char c[PATH_MAX * 2]; 1242 1243 if (format == NULL) 1244 format = " %-50s"; 1245 1246 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1247 (void) snprintf(c, sizeof (c), "%s`%s", 1248 dts.dts_object, dts.dts_name); 1249 } else { 1250 /* 1251 * We'll repeat the lookup, but this time we'll specify a 1252 * NULL GElf_Sym -- indicating that we're only interested in 1253 * the containing module. 1254 */ 1255 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1256 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1257 dts.dts_object, (u_longlong_t)pc); 1258 } else { 1259 (void) snprintf(c, sizeof (c), "0x%llx", 1260 (u_longlong_t)pc); 1261 } 1262 } 1263 1264 if (dt_printf(dtp, fp, format, c) < 0) 1265 return (-1); 1266 1267 return (0); 1268 } 1269 1270 int 1271 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1272 { 1273 /* LINTED - alignment */ 1274 uint64_t pc = *((uint64_t *)addr); 1275 dtrace_syminfo_t dts; 1276 char c[PATH_MAX * 2]; 1277 1278 if (format == NULL) 1279 format = " %-50s"; 1280 1281 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1282 (void) snprintf(c, sizeof (c), "%s", dts.dts_object); 1283 } else { 1284 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1285 } 1286 1287 if (dt_printf(dtp, fp, format, c) < 0) 1288 return (-1); 1289 1290 return (0); 1291 } 1292 1293 typedef struct dt_normal { 1294 dtrace_aggvarid_t dtnd_id; 1295 uint64_t dtnd_normal; 1296 } dt_normal_t; 1297 1298 static int 1299 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1300 { 1301 dt_normal_t *normal = arg; 1302 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1303 dtrace_aggvarid_t id = normal->dtnd_id; 1304 1305 if (agg->dtagd_nrecs == 0) 1306 return (DTRACE_AGGWALK_NEXT); 1307 1308 if (agg->dtagd_varid != id) 1309 return (DTRACE_AGGWALK_NEXT); 1310 1311 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal; 1312 return (DTRACE_AGGWALK_NORMALIZE); 1313 } 1314 1315 static int 1316 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1317 { 1318 dt_normal_t normal; 1319 caddr_t addr; 1320 1321 /* 1322 * We (should) have two records: the aggregation ID followed by the 1323 * normalization value. 1324 */ 1325 addr = base + rec->dtrd_offset; 1326 1327 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1328 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1329 1330 /* LINTED - alignment */ 1331 normal.dtnd_id = *((dtrace_aggvarid_t *)addr); 1332 rec++; 1333 1334 if (rec->dtrd_action != DTRACEACT_LIBACT) 1335 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1336 1337 if (rec->dtrd_arg != DT_ACT_NORMALIZE) 1338 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1339 1340 addr = base + rec->dtrd_offset; 1341 1342 switch (rec->dtrd_size) { 1343 case sizeof (uint64_t): 1344 /* LINTED - alignment */ 1345 normal.dtnd_normal = *((uint64_t *)addr); 1346 break; 1347 case sizeof (uint32_t): 1348 /* LINTED - alignment */ 1349 normal.dtnd_normal = *((uint32_t *)addr); 1350 break; 1351 case sizeof (uint16_t): 1352 /* LINTED - alignment */ 1353 normal.dtnd_normal = *((uint16_t *)addr); 1354 break; 1355 case sizeof (uint8_t): 1356 normal.dtnd_normal = *((uint8_t *)addr); 1357 break; 1358 default: 1359 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1360 } 1361 1362 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal); 1363 1364 return (0); 1365 } 1366 1367 static int 1368 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1369 { 1370 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1371 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1372 1373 if (agg->dtagd_nrecs == 0) 1374 return (DTRACE_AGGWALK_NEXT); 1375 1376 if (agg->dtagd_varid != id) 1377 return (DTRACE_AGGWALK_NEXT); 1378 1379 return (DTRACE_AGGWALK_DENORMALIZE); 1380 } 1381 1382 static int 1383 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg) 1384 { 1385 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1386 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1387 1388 if (agg->dtagd_nrecs == 0) 1389 return (DTRACE_AGGWALK_NEXT); 1390 1391 if (agg->dtagd_varid != id) 1392 return (DTRACE_AGGWALK_NEXT); 1393 1394 return (DTRACE_AGGWALK_CLEAR); 1395 } 1396 1397 typedef struct dt_trunc { 1398 dtrace_aggvarid_t dttd_id; 1399 uint64_t dttd_remaining; 1400 } dt_trunc_t; 1401 1402 static int 1403 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg) 1404 { 1405 dt_trunc_t *trunc = arg; 1406 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1407 dtrace_aggvarid_t id = trunc->dttd_id; 1408 1409 if (agg->dtagd_nrecs == 0) 1410 return (DTRACE_AGGWALK_NEXT); 1411 1412 if (agg->dtagd_varid != id) 1413 return (DTRACE_AGGWALK_NEXT); 1414 1415 if (trunc->dttd_remaining == 0) 1416 return (DTRACE_AGGWALK_REMOVE); 1417 1418 trunc->dttd_remaining--; 1419 return (DTRACE_AGGWALK_NEXT); 1420 } 1421 1422 static int 1423 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1424 { 1425 dt_trunc_t trunc; 1426 caddr_t addr; 1427 int64_t remaining; 1428 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *); 1429 1430 /* 1431 * We (should) have two records: the aggregation ID followed by the 1432 * number of aggregation entries after which the aggregation is to be 1433 * truncated. 1434 */ 1435 addr = base + rec->dtrd_offset; 1436 1437 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1438 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1439 1440 /* LINTED - alignment */ 1441 trunc.dttd_id = *((dtrace_aggvarid_t *)addr); 1442 rec++; 1443 1444 if (rec->dtrd_action != DTRACEACT_LIBACT) 1445 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1446 1447 if (rec->dtrd_arg != DT_ACT_TRUNC) 1448 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1449 1450 addr = base + rec->dtrd_offset; 1451 1452 switch (rec->dtrd_size) { 1453 case sizeof (uint64_t): 1454 /* LINTED - alignment */ 1455 remaining = *((int64_t *)addr); 1456 break; 1457 case sizeof (uint32_t): 1458 /* LINTED - alignment */ 1459 remaining = *((int32_t *)addr); 1460 break; 1461 case sizeof (uint16_t): 1462 /* LINTED - alignment */ 1463 remaining = *((int16_t *)addr); 1464 break; 1465 case sizeof (uint8_t): 1466 remaining = *((int8_t *)addr); 1467 break; 1468 default: 1469 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1470 } 1471 1472 if (remaining < 0) { 1473 func = dtrace_aggregate_walk_valsorted; 1474 remaining = -remaining; 1475 } else { 1476 func = dtrace_aggregate_walk_valrevsorted; 1477 } 1478 1479 assert(remaining >= 0); 1480 trunc.dttd_remaining = remaining; 1481 1482 (void) func(dtp, dt_trunc_agg, &trunc); 1483 1484 return (0); 1485 } 1486 1487 static int 1488 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec, 1489 caddr_t addr, size_t size, uint64_t normal) 1490 { 1491 int err; 1492 dtrace_actkind_t act = rec->dtrd_action; 1493 1494 switch (act) { 1495 case DTRACEACT_STACK: 1496 return (dt_print_stack(dtp, fp, NULL, addr, 1497 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg)); 1498 1499 case DTRACEACT_USTACK: 1500 case DTRACEACT_JSTACK: 1501 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg)); 1502 1503 case DTRACEACT_USYM: 1504 case DTRACEACT_UADDR: 1505 return (dt_print_usym(dtp, fp, addr, act)); 1506 1507 case DTRACEACT_UMOD: 1508 return (dt_print_umod(dtp, fp, NULL, addr)); 1509 1510 case DTRACEACT_SYM: 1511 return (dt_print_sym(dtp, fp, NULL, addr)); 1512 1513 case DTRACEACT_MOD: 1514 return (dt_print_mod(dtp, fp, NULL, addr)); 1515 1516 case DTRACEAGG_QUANTIZE: 1517 return (dt_print_quantize(dtp, fp, addr, size, normal)); 1518 1519 case DTRACEAGG_LQUANTIZE: 1520 return (dt_print_lquantize(dtp, fp, addr, size, normal)); 1521 1522 case DTRACEAGG_LLQUANTIZE: 1523 return (dt_print_llquantize(dtp, fp, addr, size, normal)); 1524 1525 case DTRACEAGG_AVG: 1526 return (dt_print_average(dtp, fp, addr, size, normal)); 1527 1528 case DTRACEAGG_STDDEV: 1529 return (dt_print_stddev(dtp, fp, addr, size, normal)); 1530 1531 default: 1532 break; 1533 } 1534 1535 switch (size) { 1536 case sizeof (uint64_t): 1537 err = dt_printf(dtp, fp, " %16lld", 1538 /* LINTED - alignment */ 1539 (long long)*((uint64_t *)addr) / normal); 1540 break; 1541 case sizeof (uint32_t): 1542 /* LINTED - alignment */ 1543 err = dt_printf(dtp, fp, " %8d", *((uint32_t *)addr) / 1544 (uint32_t)normal); 1545 break; 1546 case sizeof (uint16_t): 1547 /* LINTED - alignment */ 1548 err = dt_printf(dtp, fp, " %5d", *((uint16_t *)addr) / 1549 (uint32_t)normal); 1550 break; 1551 case sizeof (uint8_t): 1552 err = dt_printf(dtp, fp, " %3d", *((uint8_t *)addr) / 1553 (uint32_t)normal); 1554 break; 1555 default: 1556 err = dt_print_bytes(dtp, fp, addr, size, 50, 0, 0); 1557 break; 1558 } 1559 1560 return (err); 1561 } 1562 1563 int 1564 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg) 1565 { 1566 int i, aggact = 0; 1567 dt_print_aggdata_t *pd = arg; 1568 const dtrace_aggdata_t *aggdata = aggsdata[0]; 1569 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1570 FILE *fp = pd->dtpa_fp; 1571 dtrace_hdl_t *dtp = pd->dtpa_dtp; 1572 dtrace_recdesc_t *rec; 1573 dtrace_actkind_t act; 1574 caddr_t addr; 1575 size_t size; 1576 1577 /* 1578 * Iterate over each record description in the key, printing the traced 1579 * data, skipping the first datum (the tuple member created by the 1580 * compiler). 1581 */ 1582 for (i = 1; i < agg->dtagd_nrecs; i++) { 1583 rec = &agg->dtagd_rec[i]; 1584 act = rec->dtrd_action; 1585 addr = aggdata->dtada_data + rec->dtrd_offset; 1586 size = rec->dtrd_size; 1587 1588 if (DTRACEACT_ISAGG(act)) { 1589 aggact = i; 1590 break; 1591 } 1592 1593 if (dt_print_datum(dtp, fp, rec, addr, size, 1) < 0) 1594 return (-1); 1595 1596 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 1597 DTRACE_BUFDATA_AGGKEY) < 0) 1598 return (-1); 1599 } 1600 1601 assert(aggact != 0); 1602 1603 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) { 1604 uint64_t normal; 1605 1606 aggdata = aggsdata[i]; 1607 agg = aggdata->dtada_desc; 1608 rec = &agg->dtagd_rec[aggact]; 1609 act = rec->dtrd_action; 1610 addr = aggdata->dtada_data + rec->dtrd_offset; 1611 size = rec->dtrd_size; 1612 1613 assert(DTRACEACT_ISAGG(act)); 1614 normal = aggdata->dtada_normal; 1615 1616 if (dt_print_datum(dtp, fp, rec, addr, size, normal) < 0) 1617 return (-1); 1618 1619 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 1620 DTRACE_BUFDATA_AGGVAL) < 0) 1621 return (-1); 1622 1623 if (!pd->dtpa_allunprint) 1624 agg->dtagd_flags |= DTRACE_AGD_PRINTED; 1625 } 1626 1627 if (dt_printf(dtp, fp, "\n") < 0) 1628 return (-1); 1629 1630 if (dt_buffered_flush(dtp, NULL, NULL, aggdata, 1631 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0) 1632 return (-1); 1633 1634 return (0); 1635 } 1636 1637 int 1638 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg) 1639 { 1640 dt_print_aggdata_t *pd = arg; 1641 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1642 dtrace_aggvarid_t aggvarid = pd->dtpa_id; 1643 1644 if (pd->dtpa_allunprint) { 1645 if (agg->dtagd_flags & DTRACE_AGD_PRINTED) 1646 return (0); 1647 } else { 1648 /* 1649 * If we're not printing all unprinted aggregations, then the 1650 * aggregation variable ID denotes a specific aggregation 1651 * variable that we should print -- skip any other aggregations 1652 * that we encounter. 1653 */ 1654 if (agg->dtagd_nrecs == 0) 1655 return (0); 1656 1657 if (aggvarid != agg->dtagd_varid) 1658 return (0); 1659 } 1660 1661 return (dt_print_aggs(&aggdata, 1, arg)); 1662 } 1663 1664 int 1665 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data, 1666 const char *option, const char *value) 1667 { 1668 int len, rval; 1669 char *msg; 1670 const char *errstr; 1671 dtrace_setoptdata_t optdata; 1672 1673 bzero(&optdata, sizeof (optdata)); 1674 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval); 1675 1676 if (dtrace_setopt(dtp, option, value) == 0) { 1677 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval); 1678 optdata.dtsda_probe = data; 1679 optdata.dtsda_option = option; 1680 optdata.dtsda_handle = dtp; 1681 1682 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0) 1683 return (rval); 1684 1685 return (0); 1686 } 1687 1688 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp)); 1689 len = strlen(option) + strlen(value) + strlen(errstr) + 80; 1690 msg = alloca(len); 1691 1692 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n", 1693 option, value, errstr); 1694 1695 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0) 1696 return (0); 1697 1698 return (rval); 1699 } 1700 1701 static int 1702 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu, dtrace_bufdesc_t *buf, 1703 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg) 1704 { 1705 dtrace_epid_t id; 1706 size_t offs, start = buf->dtbd_oldest, end = buf->dtbd_size; 1707 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET); 1708 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 1709 int rval, i, n; 1710 dtrace_epid_t last = DTRACE_EPIDNONE; 1711 uint64_t tracememsize = 0; 1712 dtrace_probedata_t data; 1713 uint64_t drops; 1714 caddr_t addr; 1715 1716 bzero(&data, sizeof (data)); 1717 data.dtpda_handle = dtp; 1718 data.dtpda_cpu = cpu; 1719 1720 again: 1721 for (offs = start; offs < end; ) { 1722 dtrace_eprobedesc_t *epd; 1723 1724 /* 1725 * We're guaranteed to have an ID. 1726 */ 1727 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 1728 1729 if (id == DTRACE_EPIDNONE) { 1730 /* 1731 * This is filler to assure proper alignment of the 1732 * next record; we simply ignore it. 1733 */ 1734 offs += sizeof (id); 1735 continue; 1736 } 1737 1738 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc, 1739 &data.dtpda_pdesc)) != 0) 1740 return (rval); 1741 1742 epd = data.dtpda_edesc; 1743 data.dtpda_data = buf->dtbd_data + offs; 1744 1745 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) { 1746 rval = dt_handle(dtp, &data); 1747 1748 if (rval == DTRACE_CONSUME_NEXT) 1749 goto nextepid; 1750 1751 if (rval == DTRACE_CONSUME_ERROR) 1752 return (-1); 1753 } 1754 1755 if (flow) 1756 (void) dt_flowindent(dtp, &data, last, buf, offs); 1757 1758 rval = (*efunc)(&data, arg); 1759 1760 if (flow) { 1761 if (data.dtpda_flow == DTRACEFLOW_ENTRY) 1762 data.dtpda_indent += 2; 1763 } 1764 1765 if (rval == DTRACE_CONSUME_NEXT) 1766 goto nextepid; 1767 1768 if (rval == DTRACE_CONSUME_ABORT) 1769 return (dt_set_errno(dtp, EDT_DIRABORT)); 1770 1771 if (rval != DTRACE_CONSUME_THIS) 1772 return (dt_set_errno(dtp, EDT_BADRVAL)); 1773 1774 for (i = 0; i < epd->dtepd_nrecs; i++) { 1775 dtrace_recdesc_t *rec = &epd->dtepd_rec[i]; 1776 dtrace_actkind_t act = rec->dtrd_action; 1777 1778 data.dtpda_data = buf->dtbd_data + offs + 1779 rec->dtrd_offset; 1780 addr = data.dtpda_data; 1781 1782 if (act == DTRACEACT_LIBACT) { 1783 uint64_t arg = rec->dtrd_arg; 1784 dtrace_aggvarid_t id; 1785 1786 switch (arg) { 1787 case DT_ACT_CLEAR: 1788 /* LINTED - alignment */ 1789 id = *((dtrace_aggvarid_t *)addr); 1790 (void) dtrace_aggregate_walk(dtp, 1791 dt_clear_agg, &id); 1792 continue; 1793 1794 case DT_ACT_DENORMALIZE: 1795 /* LINTED - alignment */ 1796 id = *((dtrace_aggvarid_t *)addr); 1797 (void) dtrace_aggregate_walk(dtp, 1798 dt_denormalize_agg, &id); 1799 continue; 1800 1801 case DT_ACT_FTRUNCATE: 1802 if (fp == NULL) 1803 continue; 1804 1805 (void) fflush(fp); 1806 (void) ftruncate(fileno(fp), 0); 1807 (void) fseeko(fp, 0, SEEK_SET); 1808 continue; 1809 1810 case DT_ACT_NORMALIZE: 1811 if (i == epd->dtepd_nrecs - 1) 1812 return (dt_set_errno(dtp, 1813 EDT_BADNORMAL)); 1814 1815 if (dt_normalize(dtp, 1816 buf->dtbd_data + offs, rec) != 0) 1817 return (-1); 1818 1819 i++; 1820 continue; 1821 1822 case DT_ACT_SETOPT: { 1823 uint64_t *opts = dtp->dt_options; 1824 dtrace_recdesc_t *valrec; 1825 uint32_t valsize; 1826 caddr_t val; 1827 int rv; 1828 1829 if (i == epd->dtepd_nrecs - 1) { 1830 return (dt_set_errno(dtp, 1831 EDT_BADSETOPT)); 1832 } 1833 1834 valrec = &epd->dtepd_rec[++i]; 1835 valsize = valrec->dtrd_size; 1836 1837 if (valrec->dtrd_action != act || 1838 valrec->dtrd_arg != arg) { 1839 return (dt_set_errno(dtp, 1840 EDT_BADSETOPT)); 1841 } 1842 1843 if (valsize > sizeof (uint64_t)) { 1844 val = buf->dtbd_data + offs + 1845 valrec->dtrd_offset; 1846 } else { 1847 val = "1"; 1848 } 1849 1850 rv = dt_setopt(dtp, &data, addr, val); 1851 1852 if (rv != 0) 1853 return (-1); 1854 1855 flow = (opts[DTRACEOPT_FLOWINDENT] != 1856 DTRACEOPT_UNSET); 1857 quiet = (opts[DTRACEOPT_QUIET] != 1858 DTRACEOPT_UNSET); 1859 1860 continue; 1861 } 1862 1863 case DT_ACT_TRUNC: 1864 if (i == epd->dtepd_nrecs - 1) 1865 return (dt_set_errno(dtp, 1866 EDT_BADTRUNC)); 1867 1868 if (dt_trunc(dtp, 1869 buf->dtbd_data + offs, rec) != 0) 1870 return (-1); 1871 1872 i++; 1873 continue; 1874 1875 default: 1876 continue; 1877 } 1878 } 1879 1880 if (act == DTRACEACT_TRACEMEM_DYNSIZE && 1881 rec->dtrd_size == sizeof (uint64_t)) { 1882 /* LINTED - alignment */ 1883 tracememsize = *((unsigned long long *)addr); 1884 continue; 1885 } 1886 1887 rval = (*rfunc)(&data, rec, arg); 1888 1889 if (rval == DTRACE_CONSUME_NEXT) 1890 continue; 1891 1892 if (rval == DTRACE_CONSUME_ABORT) 1893 return (dt_set_errno(dtp, EDT_DIRABORT)); 1894 1895 if (rval != DTRACE_CONSUME_THIS) 1896 return (dt_set_errno(dtp, EDT_BADRVAL)); 1897 1898 if (act == DTRACEACT_STACK) { 1899 int depth = rec->dtrd_arg; 1900 1901 if (dt_print_stack(dtp, fp, NULL, addr, depth, 1902 rec->dtrd_size / depth) < 0) 1903 return (-1); 1904 goto nextrec; 1905 } 1906 1907 if (act == DTRACEACT_USTACK || 1908 act == DTRACEACT_JSTACK) { 1909 if (dt_print_ustack(dtp, fp, NULL, 1910 addr, rec->dtrd_arg) < 0) 1911 return (-1); 1912 goto nextrec; 1913 } 1914 1915 if (act == DTRACEACT_SYM) { 1916 if (dt_print_sym(dtp, fp, NULL, addr) < 0) 1917 return (-1); 1918 goto nextrec; 1919 } 1920 1921 if (act == DTRACEACT_MOD) { 1922 if (dt_print_mod(dtp, fp, NULL, addr) < 0) 1923 return (-1); 1924 goto nextrec; 1925 } 1926 1927 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) { 1928 if (dt_print_usym(dtp, fp, addr, act) < 0) 1929 return (-1); 1930 goto nextrec; 1931 } 1932 1933 if (act == DTRACEACT_UMOD) { 1934 if (dt_print_umod(dtp, fp, NULL, addr) < 0) 1935 return (-1); 1936 goto nextrec; 1937 } 1938 1939 if (DTRACEACT_ISPRINTFLIKE(act)) { 1940 void *fmtdata; 1941 int (*func)(dtrace_hdl_t *, FILE *, void *, 1942 const dtrace_probedata_t *, 1943 const dtrace_recdesc_t *, uint_t, 1944 const void *buf, size_t); 1945 1946 if ((fmtdata = dt_format_lookup(dtp, 1947 rec->dtrd_format)) == NULL) 1948 goto nofmt; 1949 1950 switch (act) { 1951 case DTRACEACT_PRINTF: 1952 func = dtrace_fprintf; 1953 break; 1954 case DTRACEACT_PRINTA: 1955 func = dtrace_fprinta; 1956 break; 1957 case DTRACEACT_SYSTEM: 1958 func = dtrace_system; 1959 break; 1960 case DTRACEACT_FREOPEN: 1961 func = dtrace_freopen; 1962 break; 1963 } 1964 1965 n = (*func)(dtp, fp, fmtdata, &data, 1966 rec, epd->dtepd_nrecs - i, 1967 (uchar_t *)buf->dtbd_data + offs, 1968 buf->dtbd_size - offs); 1969 1970 if (n < 0) 1971 return (-1); /* errno is set for us */ 1972 1973 if (n > 0) 1974 i += n - 1; 1975 goto nextrec; 1976 } 1977 1978 nofmt: 1979 if (act == DTRACEACT_PRINTA) { 1980 dt_print_aggdata_t pd; 1981 dtrace_aggvarid_t *aggvars; 1982 int j, naggvars = 0; 1983 size_t size = ((epd->dtepd_nrecs - i) * 1984 sizeof (dtrace_aggvarid_t)); 1985 1986 if ((aggvars = dt_alloc(dtp, size)) == NULL) 1987 return (-1); 1988 1989 /* 1990 * This might be a printa() with multiple 1991 * aggregation variables. We need to scan 1992 * forward through the records until we find 1993 * a record from a different statement. 1994 */ 1995 for (j = i; j < epd->dtepd_nrecs; j++) { 1996 dtrace_recdesc_t *nrec; 1997 caddr_t naddr; 1998 1999 nrec = &epd->dtepd_rec[j]; 2000 2001 if (nrec->dtrd_uarg != rec->dtrd_uarg) 2002 break; 2003 2004 if (nrec->dtrd_action != act) { 2005 return (dt_set_errno(dtp, 2006 EDT_BADAGG)); 2007 } 2008 2009 naddr = buf->dtbd_data + offs + 2010 nrec->dtrd_offset; 2011 2012 aggvars[naggvars++] = 2013 /* LINTED - alignment */ 2014 *((dtrace_aggvarid_t *)naddr); 2015 } 2016 2017 i = j - 1; 2018 bzero(&pd, sizeof (pd)); 2019 pd.dtpa_dtp = dtp; 2020 pd.dtpa_fp = fp; 2021 2022 assert(naggvars >= 1); 2023 2024 if (naggvars == 1) { 2025 pd.dtpa_id = aggvars[0]; 2026 dt_free(dtp, aggvars); 2027 2028 if (dt_printf(dtp, fp, "\n") < 0 || 2029 dtrace_aggregate_walk_sorted(dtp, 2030 dt_print_agg, &pd) < 0) 2031 return (-1); 2032 goto nextrec; 2033 } 2034 2035 if (dt_printf(dtp, fp, "\n") < 0 || 2036 dtrace_aggregate_walk_joined(dtp, aggvars, 2037 naggvars, dt_print_aggs, &pd) < 0) { 2038 dt_free(dtp, aggvars); 2039 return (-1); 2040 } 2041 2042 dt_free(dtp, aggvars); 2043 goto nextrec; 2044 } 2045 2046 if (act == DTRACEACT_TRACEMEM) { 2047 if (tracememsize == 0 || 2048 tracememsize > rec->dtrd_size) { 2049 tracememsize = rec->dtrd_size; 2050 } 2051 2052 n = dt_print_bytes(dtp, fp, addr, 2053 tracememsize, 33, quiet, 1); 2054 2055 tracememsize = 0; 2056 2057 if (n < 0) 2058 return (-1); 2059 2060 goto nextrec; 2061 } 2062 2063 switch (rec->dtrd_size) { 2064 case sizeof (uint64_t): 2065 n = dt_printf(dtp, fp, 2066 quiet ? "%lld" : " %16lld", 2067 /* LINTED - alignment */ 2068 *((unsigned long long *)addr)); 2069 break; 2070 case sizeof (uint32_t): 2071 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", 2072 /* LINTED - alignment */ 2073 *((uint32_t *)addr)); 2074 break; 2075 case sizeof (uint16_t): 2076 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", 2077 /* LINTED - alignment */ 2078 *((uint16_t *)addr)); 2079 break; 2080 case sizeof (uint8_t): 2081 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", 2082 *((uint8_t *)addr)); 2083 break; 2084 default: 2085 n = dt_print_bytes(dtp, fp, addr, 2086 rec->dtrd_size, 33, quiet, 0); 2087 break; 2088 } 2089 2090 if (n < 0) 2091 return (-1); /* errno is set for us */ 2092 2093 nextrec: 2094 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0) 2095 return (-1); /* errno is set for us */ 2096 } 2097 2098 /* 2099 * Call the record callback with a NULL record to indicate 2100 * that we're done processing this EPID. 2101 */ 2102 rval = (*rfunc)(&data, NULL, arg); 2103 nextepid: 2104 offs += epd->dtepd_size; 2105 last = id; 2106 } 2107 2108 if (buf->dtbd_oldest != 0 && start == buf->dtbd_oldest) { 2109 end = buf->dtbd_oldest; 2110 start = 0; 2111 goto again; 2112 } 2113 2114 if ((drops = buf->dtbd_drops) == 0) 2115 return (0); 2116 2117 /* 2118 * Explicitly zero the drops to prevent us from processing them again. 2119 */ 2120 buf->dtbd_drops = 0; 2121 2122 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops)); 2123 } 2124 2125 typedef struct dt_begin { 2126 dtrace_consume_probe_f *dtbgn_probefunc; 2127 dtrace_consume_rec_f *dtbgn_recfunc; 2128 void *dtbgn_arg; 2129 dtrace_handle_err_f *dtbgn_errhdlr; 2130 void *dtbgn_errarg; 2131 int dtbgn_beginonly; 2132 } dt_begin_t; 2133 2134 static int 2135 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg) 2136 { 2137 dt_begin_t *begin = (dt_begin_t *)arg; 2138 dtrace_probedesc_t *pd = data->dtpda_pdesc; 2139 2140 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2141 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2142 2143 if (begin->dtbgn_beginonly) { 2144 if (!(r1 && r2)) 2145 return (DTRACE_CONSUME_NEXT); 2146 } else { 2147 if (r1 && r2) 2148 return (DTRACE_CONSUME_NEXT); 2149 } 2150 2151 /* 2152 * We have a record that we're interested in. Now call the underlying 2153 * probe function... 2154 */ 2155 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg)); 2156 } 2157 2158 static int 2159 dt_consume_begin_record(const dtrace_probedata_t *data, 2160 const dtrace_recdesc_t *rec, void *arg) 2161 { 2162 dt_begin_t *begin = (dt_begin_t *)arg; 2163 2164 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg)); 2165 } 2166 2167 static int 2168 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg) 2169 { 2170 dt_begin_t *begin = (dt_begin_t *)arg; 2171 dtrace_probedesc_t *pd = data->dteda_pdesc; 2172 2173 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2174 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2175 2176 if (begin->dtbgn_beginonly) { 2177 if (!(r1 && r2)) 2178 return (DTRACE_HANDLE_OK); 2179 } else { 2180 if (r1 && r2) 2181 return (DTRACE_HANDLE_OK); 2182 } 2183 2184 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg)); 2185 } 2186 2187 static int 2188 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, dtrace_bufdesc_t *buf, 2189 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2190 { 2191 /* 2192 * There's this idea that the BEGIN probe should be processed before 2193 * everything else, and that the END probe should be processed after 2194 * anything else. In the common case, this is pretty easy to deal 2195 * with. However, a situation may arise where the BEGIN enabling and 2196 * END enabling are on the same CPU, and some enabling in the middle 2197 * occurred on a different CPU. To deal with this (blech!) we need to 2198 * consume the BEGIN buffer up until the end of the BEGIN probe, and 2199 * then set it aside. We will then process every other CPU, and then 2200 * we'll return to the BEGIN CPU and process the rest of the data 2201 * (which will inevitably include the END probe, if any). Making this 2202 * even more complicated (!) is the library's ERROR enabling. Because 2203 * this enabling is processed before we even get into the consume call 2204 * back, any ERROR firing would result in the library's ERROR enabling 2205 * being processed twice -- once in our first pass (for BEGIN probes), 2206 * and again in our second pass (for everything but BEGIN probes). To 2207 * deal with this, we interpose on the ERROR handler to assure that we 2208 * only process ERROR enablings induced by BEGIN enablings in the 2209 * first pass, and that we only process ERROR enablings _not_ induced 2210 * by BEGIN enablings in the second pass. 2211 */ 2212 dt_begin_t begin; 2213 processorid_t cpu = dtp->dt_beganon; 2214 dtrace_bufdesc_t nbuf; 2215 int rval, i; 2216 static int max_ncpus; 2217 dtrace_optval_t size; 2218 2219 dtp->dt_beganon = -1; 2220 2221 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2222 /* 2223 * We really don't expect this to fail, but it is at least 2224 * technically possible for this to fail with ENOENT. In this 2225 * case, we just drive on... 2226 */ 2227 if (errno == ENOENT) 2228 return (0); 2229 2230 return (dt_set_errno(dtp, errno)); 2231 } 2232 2233 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { 2234 /* 2235 * This is the simple case. We're either not stopped, or if 2236 * we are, we actually processed any END probes on another 2237 * CPU. We can simply consume this buffer and return. 2238 */ 2239 return (dt_consume_cpu(dtp, fp, cpu, buf, pf, rf, arg)); 2240 } 2241 2242 begin.dtbgn_probefunc = pf; 2243 begin.dtbgn_recfunc = rf; 2244 begin.dtbgn_arg = arg; 2245 begin.dtbgn_beginonly = 1; 2246 2247 /* 2248 * We need to interpose on the ERROR handler to be sure that we 2249 * only process ERRORs induced by BEGIN. 2250 */ 2251 begin.dtbgn_errhdlr = dtp->dt_errhdlr; 2252 begin.dtbgn_errarg = dtp->dt_errarg; 2253 dtp->dt_errhdlr = dt_consume_begin_error; 2254 dtp->dt_errarg = &begin; 2255 2256 rval = dt_consume_cpu(dtp, fp, cpu, buf, dt_consume_begin_probe, 2257 dt_consume_begin_record, &begin); 2258 2259 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2260 dtp->dt_errarg = begin.dtbgn_errarg; 2261 2262 if (rval != 0) 2263 return (rval); 2264 2265 /* 2266 * Now allocate a new buffer. We'll use this to deal with every other 2267 * CPU. 2268 */ 2269 bzero(&nbuf, sizeof (dtrace_bufdesc_t)); 2270 (void) dtrace_getopt(dtp, "bufsize", &size); 2271 if ((nbuf.dtbd_data = malloc(size)) == NULL) 2272 return (dt_set_errno(dtp, EDT_NOMEM)); 2273 2274 if (max_ncpus == 0) 2275 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2276 2277 for (i = 0; i < max_ncpus; i++) { 2278 nbuf.dtbd_cpu = i; 2279 2280 if (i == cpu) 2281 continue; 2282 2283 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &nbuf) == -1) { 2284 /* 2285 * If we failed with ENOENT, it may be because the 2286 * CPU was unconfigured -- this is okay. Any other 2287 * error, however, is unexpected. 2288 */ 2289 if (errno == ENOENT) 2290 continue; 2291 2292 free(nbuf.dtbd_data); 2293 2294 return (dt_set_errno(dtp, errno)); 2295 } 2296 2297 if ((rval = dt_consume_cpu(dtp, fp, 2298 i, &nbuf, pf, rf, arg)) != 0) { 2299 free(nbuf.dtbd_data); 2300 return (rval); 2301 } 2302 } 2303 2304 free(nbuf.dtbd_data); 2305 2306 /* 2307 * Okay -- we're done with the other buffers. Now we want to 2308 * reconsume the first buffer -- but this time we're looking for 2309 * everything _but_ BEGIN. And of course, in order to only consume 2310 * those ERRORs _not_ associated with BEGIN, we need to reinstall our 2311 * ERROR interposition function... 2312 */ 2313 begin.dtbgn_beginonly = 0; 2314 2315 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); 2316 assert(begin.dtbgn_errarg == dtp->dt_errarg); 2317 dtp->dt_errhdlr = dt_consume_begin_error; 2318 dtp->dt_errarg = &begin; 2319 2320 rval = dt_consume_cpu(dtp, fp, cpu, buf, dt_consume_begin_probe, 2321 dt_consume_begin_record, &begin); 2322 2323 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2324 dtp->dt_errarg = begin.dtbgn_errarg; 2325 2326 return (rval); 2327 } 2328 2329 int 2330 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp, 2331 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2332 { 2333 dtrace_bufdesc_t *buf = &dtp->dt_buf; 2334 dtrace_optval_t size; 2335 static int max_ncpus; 2336 int i, rval; 2337 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; 2338 hrtime_t now = gethrtime(); 2339 2340 if (dtp->dt_lastswitch != 0) { 2341 if (now - dtp->dt_lastswitch < interval) 2342 return (0); 2343 2344 dtp->dt_lastswitch += interval; 2345 } else { 2346 dtp->dt_lastswitch = now; 2347 } 2348 2349 if (!dtp->dt_active) 2350 return (dt_set_errno(dtp, EINVAL)); 2351 2352 if (max_ncpus == 0) 2353 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2354 2355 if (pf == NULL) 2356 pf = (dtrace_consume_probe_f *)dt_nullprobe; 2357 2358 if (rf == NULL) 2359 rf = (dtrace_consume_rec_f *)dt_nullrec; 2360 2361 if (buf->dtbd_data == NULL) { 2362 (void) dtrace_getopt(dtp, "bufsize", &size); 2363 if ((buf->dtbd_data = malloc(size)) == NULL) 2364 return (dt_set_errno(dtp, EDT_NOMEM)); 2365 2366 buf->dtbd_size = size; 2367 } 2368 2369 /* 2370 * If we have just begun, we want to first process the CPU that 2371 * executed the BEGIN probe (if any). 2372 */ 2373 if (dtp->dt_active && dtp->dt_beganon != -1) { 2374 buf->dtbd_cpu = dtp->dt_beganon; 2375 if ((rval = dt_consume_begin(dtp, fp, buf, pf, rf, arg)) != 0) 2376 return (rval); 2377 } 2378 2379 for (i = 0; i < max_ncpus; i++) { 2380 buf->dtbd_cpu = i; 2381 2382 /* 2383 * If we have stopped, we want to process the CPU on which the 2384 * END probe was processed only _after_ we have processed 2385 * everything else. 2386 */ 2387 if (dtp->dt_stopped && (i == dtp->dt_endedon)) 2388 continue; 2389 2390 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2391 /* 2392 * If we failed with ENOENT, it may be because the 2393 * CPU was unconfigured -- this is okay. Any other 2394 * error, however, is unexpected. 2395 */ 2396 if (errno == ENOENT) 2397 continue; 2398 2399 return (dt_set_errno(dtp, errno)); 2400 } 2401 2402 if ((rval = dt_consume_cpu(dtp, fp, i, buf, pf, rf, arg)) != 0) 2403 return (rval); 2404 } 2405 2406 if (!dtp->dt_stopped) 2407 return (0); 2408 2409 buf->dtbd_cpu = dtp->dt_endedon; 2410 2411 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2412 /* 2413 * This _really_ shouldn't fail, but it is strictly speaking 2414 * possible for this to return ENOENT if the CPU that called 2415 * the END enabling somehow managed to become unconfigured. 2416 * It's unclear how the user can possibly expect anything 2417 * rational to happen in this case -- the state has been thrown 2418 * out along with the unconfigured CPU -- so we'll just drive 2419 * on... 2420 */ 2421 if (errno == ENOENT) 2422 return (0); 2423 2424 return (dt_set_errno(dtp, errno)); 2425 } 2426 2427 return (dt_consume_cpu(dtp, fp, dtp->dt_endedon, buf, pf, rf, arg)); 2428 } 2429