1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 28 * Copyright (c) 2012 by Delphix. All rights reserved. 29 */ 30 31 #include <stdlib.h> 32 #include <strings.h> 33 #include <errno.h> 34 #include <unistd.h> 35 #include <limits.h> 36 #include <assert.h> 37 #include <ctype.h> 38 #include <alloca.h> 39 #include <dt_impl.h> 40 #include <dt_pq.h> 41 42 #define DT_MASK_LO 0x00000000FFFFFFFFULL 43 44 /* 45 * We declare this here because (1) we need it and (2) we want to avoid a 46 * dependency on libm in libdtrace. 47 */ 48 static long double 49 dt_fabsl(long double x) 50 { 51 if (x < 0) 52 return (-x); 53 54 return (x); 55 } 56 57 static int 58 dt_ndigits(long long val) 59 { 60 int rval = 1; 61 long long cmp = 10; 62 63 if (val < 0) { 64 val = val == INT64_MIN ? INT64_MAX : -val; 65 rval++; 66 } 67 68 while (val > cmp && cmp > 0) { 69 rval++; 70 cmp *= 10; 71 } 72 73 return (rval < 4 ? 4 : rval); 74 } 75 76 /* 77 * 128-bit arithmetic functions needed to support the stddev() aggregating 78 * action. 79 */ 80 static int 81 dt_gt_128(uint64_t *a, uint64_t *b) 82 { 83 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0])); 84 } 85 86 static int 87 dt_ge_128(uint64_t *a, uint64_t *b) 88 { 89 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0])); 90 } 91 92 static int 93 dt_le_128(uint64_t *a, uint64_t *b) 94 { 95 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); 96 } 97 98 /* 99 * Shift the 128-bit value in a by b. If b is positive, shift left. 100 * If b is negative, shift right. 101 */ 102 static void 103 dt_shift_128(uint64_t *a, int b) 104 { 105 uint64_t mask; 106 107 if (b == 0) 108 return; 109 110 if (b < 0) { 111 b = -b; 112 if (b >= 64) { 113 a[0] = a[1] >> (b - 64); 114 a[1] = 0; 115 } else { 116 a[0] >>= b; 117 mask = 1LL << (64 - b); 118 mask -= 1; 119 a[0] |= ((a[1] & mask) << (64 - b)); 120 a[1] >>= b; 121 } 122 } else { 123 if (b >= 64) { 124 a[1] = a[0] << (b - 64); 125 a[0] = 0; 126 } else { 127 a[1] <<= b; 128 mask = a[0] >> (64 - b); 129 a[1] |= mask; 130 a[0] <<= b; 131 } 132 } 133 } 134 135 static int 136 dt_nbits_128(uint64_t *a) 137 { 138 int nbits = 0; 139 uint64_t tmp[2]; 140 uint64_t zero[2] = { 0, 0 }; 141 142 tmp[0] = a[0]; 143 tmp[1] = a[1]; 144 145 dt_shift_128(tmp, -1); 146 while (dt_gt_128(tmp, zero)) { 147 dt_shift_128(tmp, -1); 148 nbits++; 149 } 150 151 return (nbits); 152 } 153 154 static void 155 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference) 156 { 157 uint64_t result[2]; 158 159 result[0] = minuend[0] - subtrahend[0]; 160 result[1] = minuend[1] - subtrahend[1] - 161 (minuend[0] < subtrahend[0] ? 1 : 0); 162 163 difference[0] = result[0]; 164 difference[1] = result[1]; 165 } 166 167 static void 168 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 169 { 170 uint64_t result[2]; 171 172 result[0] = addend1[0] + addend2[0]; 173 result[1] = addend1[1] + addend2[1] + 174 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 175 176 sum[0] = result[0]; 177 sum[1] = result[1]; 178 } 179 180 /* 181 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 182 * use native multiplication on those, and then re-combine into the 183 * resulting 128-bit value. 184 * 185 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 186 * hi1 * hi2 << 64 + 187 * hi1 * lo2 << 32 + 188 * hi2 * lo1 << 32 + 189 * lo1 * lo2 190 */ 191 static void 192 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 193 { 194 uint64_t hi1, hi2, lo1, lo2; 195 uint64_t tmp[2]; 196 197 hi1 = factor1 >> 32; 198 hi2 = factor2 >> 32; 199 200 lo1 = factor1 & DT_MASK_LO; 201 lo2 = factor2 & DT_MASK_LO; 202 203 product[0] = lo1 * lo2; 204 product[1] = hi1 * hi2; 205 206 tmp[0] = hi1 * lo2; 207 tmp[1] = 0; 208 dt_shift_128(tmp, 32); 209 dt_add_128(product, tmp, product); 210 211 tmp[0] = hi2 * lo1; 212 tmp[1] = 0; 213 dt_shift_128(tmp, 32); 214 dt_add_128(product, tmp, product); 215 } 216 217 /* 218 * This is long-hand division. 219 * 220 * We initialize subtrahend by shifting divisor left as far as possible. We 221 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we 222 * subtract and set the appropriate bit in the result. We then shift 223 * subtrahend right by one bit for the next comparison. 224 */ 225 static void 226 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient) 227 { 228 uint64_t result[2] = { 0, 0 }; 229 uint64_t remainder[2]; 230 uint64_t subtrahend[2]; 231 uint64_t divisor_128[2]; 232 uint64_t mask[2] = { 1, 0 }; 233 int log = 0; 234 235 assert(divisor != 0); 236 237 divisor_128[0] = divisor; 238 divisor_128[1] = 0; 239 240 remainder[0] = dividend[0]; 241 remainder[1] = dividend[1]; 242 243 subtrahend[0] = divisor; 244 subtrahend[1] = 0; 245 246 while (divisor > 0) { 247 log++; 248 divisor >>= 1; 249 } 250 251 dt_shift_128(subtrahend, 128 - log); 252 dt_shift_128(mask, 128 - log); 253 254 while (dt_ge_128(remainder, divisor_128)) { 255 if (dt_ge_128(remainder, subtrahend)) { 256 dt_subtract_128(remainder, subtrahend, remainder); 257 result[0] |= mask[0]; 258 result[1] |= mask[1]; 259 } 260 261 dt_shift_128(subtrahend, -1); 262 dt_shift_128(mask, -1); 263 } 264 265 quotient[0] = result[0]; 266 quotient[1] = result[1]; 267 } 268 269 /* 270 * This is the long-hand method of calculating a square root. 271 * The algorithm is as follows: 272 * 273 * 1. Group the digits by 2 from the right. 274 * 2. Over the leftmost group, find the largest single-digit number 275 * whose square is less than that group. 276 * 3. Subtract the result of the previous step (2 or 4, depending) and 277 * bring down the next two-digit group. 278 * 4. For the result R we have so far, find the largest single-digit number 279 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3. 280 * (Note that this is doubling R and performing a decimal left-shift by 1 281 * and searching for the appropriate decimal to fill the one's place.) 282 * The value x is the next digit in the square root. 283 * Repeat steps 3 and 4 until the desired precision is reached. (We're 284 * dealing with integers, so the above is sufficient.) 285 * 286 * In decimal, the square root of 582,734 would be calculated as so: 287 * 288 * __7__6__3 289 * | 58 27 34 290 * -49 (7^2 == 49 => 7 is the first digit in the square root) 291 * -- 292 * 9 27 (Subtract and bring down the next group.) 293 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in 294 * ----- the square root) 295 * 51 34 (Subtract and bring down the next group.) 296 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in 297 * ----- the square root) 298 * 5 65 (remainder) 299 * 300 * The above algorithm applies similarly in binary, but note that the 301 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a 302 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the 303 * preceding difference? 304 * 305 * In binary, the square root of 11011011 would be calculated as so: 306 * 307 * __1__1__1__0 308 * | 11 01 10 11 309 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1) 310 * -- 311 * 10 01 10 11 312 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1) 313 * ----- 314 * 1 00 10 11 315 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1) 316 * ------- 317 * 1 01 11 318 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0) 319 * 320 */ 321 static uint64_t 322 dt_sqrt_128(uint64_t *square) 323 { 324 uint64_t result[2] = { 0, 0 }; 325 uint64_t diff[2] = { 0, 0 }; 326 uint64_t one[2] = { 1, 0 }; 327 uint64_t next_pair[2]; 328 uint64_t next_try[2]; 329 uint64_t bit_pairs, pair_shift; 330 int i; 331 332 bit_pairs = dt_nbits_128(square) / 2; 333 pair_shift = bit_pairs * 2; 334 335 for (i = 0; i <= bit_pairs; i++) { 336 /* 337 * Bring down the next pair of bits. 338 */ 339 next_pair[0] = square[0]; 340 next_pair[1] = square[1]; 341 dt_shift_128(next_pair, -pair_shift); 342 next_pair[0] &= 0x3; 343 next_pair[1] = 0; 344 345 dt_shift_128(diff, 2); 346 dt_add_128(diff, next_pair, diff); 347 348 /* 349 * next_try = R << 2 + 1 350 */ 351 next_try[0] = result[0]; 352 next_try[1] = result[1]; 353 dt_shift_128(next_try, 2); 354 dt_add_128(next_try, one, next_try); 355 356 if (dt_le_128(next_try, diff)) { 357 dt_subtract_128(diff, next_try, diff); 358 dt_shift_128(result, 1); 359 dt_add_128(result, one, result); 360 } else { 361 dt_shift_128(result, 1); 362 } 363 364 pair_shift -= 2; 365 } 366 367 assert(result[1] == 0); 368 369 return (result[0]); 370 } 371 372 uint64_t 373 dt_stddev(uint64_t *data, uint64_t normal) 374 { 375 uint64_t avg_of_squares[2]; 376 uint64_t square_of_avg[2]; 377 int64_t norm_avg; 378 uint64_t diff[2]; 379 380 /* 381 * The standard approximation for standard deviation is 382 * sqrt(average(x**2) - average(x)**2), i.e. the square root 383 * of the average of the squares minus the square of the average. 384 */ 385 dt_divide_128(data + 2, normal, avg_of_squares); 386 dt_divide_128(avg_of_squares, data[0], avg_of_squares); 387 388 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0]; 389 390 if (norm_avg < 0) 391 norm_avg = -norm_avg; 392 393 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg); 394 395 dt_subtract_128(avg_of_squares, square_of_avg, diff); 396 397 return (dt_sqrt_128(diff)); 398 } 399 400 static int 401 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last, 402 dtrace_bufdesc_t *buf, size_t offs) 403 { 404 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd; 405 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd; 406 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub; 407 dtrace_flowkind_t flow = DTRACEFLOW_NONE; 408 const char *str = NULL; 409 static const char *e_str[2] = { " -> ", " => " }; 410 static const char *r_str[2] = { " <- ", " <= " }; 411 static const char *ent = "entry", *ret = "return"; 412 static int entlen = 0, retlen = 0; 413 dtrace_epid_t next, id = epd->dtepd_epid; 414 int rval; 415 416 if (entlen == 0) { 417 assert(retlen == 0); 418 entlen = strlen(ent); 419 retlen = strlen(ret); 420 } 421 422 /* 423 * If the name of the probe is "entry" or ends with "-entry", we 424 * treat it as an entry; if it is "return" or ends with "-return", 425 * we treat it as a return. (This allows application-provided probes 426 * like "method-entry" or "function-entry" to participate in flow 427 * indentation -- without accidentally misinterpreting popular probe 428 * names like "carpentry", "gentry" or "Coventry".) 429 */ 430 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' && 431 (sub == n || sub[-1] == '-')) { 432 flow = DTRACEFLOW_ENTRY; 433 str = e_str[strcmp(p, "syscall") == 0]; 434 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' && 435 (sub == n || sub[-1] == '-')) { 436 flow = DTRACEFLOW_RETURN; 437 str = r_str[strcmp(p, "syscall") == 0]; 438 } 439 440 /* 441 * If we're going to indent this, we need to check the ID of our last 442 * call. If we're looking at the same probe ID but a different EPID, 443 * we _don't_ want to indent. (Yes, there are some minor holes in 444 * this scheme -- it's a heuristic.) 445 */ 446 if (flow == DTRACEFLOW_ENTRY) { 447 if ((last != DTRACE_EPIDNONE && id != last && 448 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id)) 449 flow = DTRACEFLOW_NONE; 450 } 451 452 /* 453 * If we're going to unindent this, it's more difficult to see if 454 * we don't actually want to unindent it -- we need to look at the 455 * _next_ EPID. 456 */ 457 if (flow == DTRACEFLOW_RETURN) { 458 offs += epd->dtepd_size; 459 460 do { 461 if (offs >= buf->dtbd_size) 462 goto out; 463 464 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 465 466 if (next == DTRACE_EPIDNONE) 467 offs += sizeof (id); 468 } while (next == DTRACE_EPIDNONE); 469 470 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0) 471 return (rval); 472 473 if (next != id && npd->dtpd_id == pd->dtpd_id) 474 flow = DTRACEFLOW_NONE; 475 } 476 477 out: 478 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) { 479 data->dtpda_prefix = str; 480 } else { 481 data->dtpda_prefix = "| "; 482 } 483 484 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0) 485 data->dtpda_indent -= 2; 486 487 data->dtpda_flow = flow; 488 489 return (0); 490 } 491 492 static int 493 dt_nullprobe() 494 { 495 return (DTRACE_CONSUME_THIS); 496 } 497 498 static int 499 dt_nullrec() 500 { 501 return (DTRACE_CONSUME_NEXT); 502 } 503 504 static void 505 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total) 506 { 507 long double val = dt_fabsl((long double)datum); 508 509 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) { 510 *total += val; 511 return; 512 } 513 514 /* 515 * If we're zooming in on an aggregation, we want the height of the 516 * highest value to be approximately 95% of total bar height -- so we 517 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to 518 * our highest value. 519 */ 520 val *= 1 / DTRACE_AGGZOOM_MAX; 521 522 if (*total < val) 523 *total = val; 524 } 525 526 static int 527 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width) 528 { 529 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n", 530 width ? width : 16, width ? "key" : "value", 531 "------------- Distribution -------------", "count")); 532 } 533 534 static int 535 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width, 536 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action) 537 { 538 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin; 539 int minwidth, maxwidth, i; 540 541 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE); 542 543 if (action == DTRACEAGG_QUANTIZE) { 544 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 545 min--; 546 547 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 548 max++; 549 550 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min)); 551 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max)); 552 } else { 553 maxwidth = 8; 554 minwidth = maxwidth - 1; 555 max++; 556 } 557 558 if (dt_printf(dtp, fp, "\n%*s %*s .", 559 width, width > 0 ? "key" : "", minwidth, "min") < 0) 560 return (-1); 561 562 for (i = min; i <= max; i++) { 563 if (dt_printf(dtp, fp, "-") < 0) 564 return (-1); 565 } 566 567 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max")); 568 } 569 570 /* 571 * We use a subset of the Unicode Block Elements (U+2588 through U+258F, 572 * inclusive) to represent aggregations via UTF-8 -- which are expressed via 573 * 3-byte UTF-8 sequences. 574 */ 575 #define DTRACE_AGGUTF8_FULL 0x2588 576 #define DTRACE_AGGUTF8_BASE 0x258f 577 #define DTRACE_AGGUTF8_LEVELS 8 578 579 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12)) 580 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f)) 581 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f)) 582 583 static int 584 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 585 uint64_t normal, long double total) 586 { 587 uint_t len = 40, i, whole, partial; 588 long double f = (dt_fabsl((long double)val) * len) / total; 589 const char *spaces = " "; 590 591 whole = (uint_t)f; 592 partial = (uint_t)((f - (long double)(uint_t)f) * 593 (long double)DTRACE_AGGUTF8_LEVELS); 594 595 if (dt_printf(dtp, fp, "|") < 0) 596 return (-1); 597 598 for (i = 0; i < whole; i++) { 599 if (dt_printf(dtp, fp, "%c%c%c", 600 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL), 601 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL), 602 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0) 603 return (-1); 604 } 605 606 if (partial != 0) { 607 partial = DTRACE_AGGUTF8_BASE - (partial - 1); 608 609 if (dt_printf(dtp, fp, "%c%c%c", 610 DTRACE_AGGUTF8_BYTE0(partial), 611 DTRACE_AGGUTF8_BYTE1(partial), 612 DTRACE_AGGUTF8_BYTE2(partial)) < 0) 613 return (-1); 614 615 i++; 616 } 617 618 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i, 619 (long long)val / normal)); 620 } 621 622 static int 623 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 624 uint64_t normal, long double total, char positives, char negatives) 625 { 626 long double f; 627 uint_t depth, len = 40; 628 629 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"; 630 const char *spaces = " "; 631 632 assert(strlen(ats) == len && strlen(spaces) == len); 633 assert(!(total == 0 && (positives || negatives))); 634 assert(!(val < 0 && !negatives)); 635 assert(!(val > 0 && !positives)); 636 assert(!(val != 0 && total == 0)); 637 638 if (!negatives) { 639 if (positives) { 640 if (dtp->dt_encoding == DT_ENCODING_UTF8) { 641 return (dt_print_quantline_utf8(dtp, fp, val, 642 normal, total)); 643 } 644 645 f = (dt_fabsl((long double)val) * len) / total; 646 depth = (uint_t)(f + 0.5); 647 } else { 648 depth = 0; 649 } 650 651 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth, 652 spaces + depth, (long long)val / normal)); 653 } 654 655 if (!positives) { 656 f = (dt_fabsl((long double)val) * len) / total; 657 depth = (uint_t)(f + 0.5); 658 659 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth, 660 ats + len - depth, (long long)val / normal)); 661 } 662 663 /* 664 * If we're here, we have both positive and negative bucket values. 665 * To express this graphically, we're going to generate both positive 666 * and negative bars separated by a centerline. These bars are half 667 * the size of normal quantize()/lquantize() bars, so we divide the 668 * length in half before calculating the bar length. 669 */ 670 len /= 2; 671 ats = &ats[len]; 672 spaces = &spaces[len]; 673 674 f = (dt_fabsl((long double)val) * len) / total; 675 depth = (uint_t)(f + 0.5); 676 677 if (val <= 0) { 678 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth, 679 ats + len - depth, len, "", (long long)val / normal)); 680 } else { 681 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "", 682 ats + len - depth, spaces + depth, 683 (long long)val / normal)); 684 } 685 } 686 687 /* 688 * As with UTF-8 printing of aggregations, we use a subset of the Unicode 689 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed 690 * aggregation. 691 */ 692 #define DTRACE_AGGPACK_BASE 0x2581 693 #define DTRACE_AGGPACK_LEVELS 8 694 695 static int 696 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp, 697 long double datum, long double total) 698 { 699 static boolean_t utf8_checked = B_FALSE; 700 static boolean_t utf8; 701 char *ascii = "__xxxxXX"; 702 char *neg = "vvvvVV"; 703 unsigned int len; 704 long double val; 705 706 if (!utf8_checked) { 707 char *term; 708 709 /* 710 * We want to determine if we can reasonably emit UTF-8 for our 711 * packed aggregation. To do this, we will check for terminals 712 * that are known to be primitive to emit UTF-8 on these. 713 */ 714 utf8_checked = B_TRUE; 715 716 if (dtp->dt_encoding == DT_ENCODING_ASCII) { 717 utf8 = B_FALSE; 718 } else if (dtp->dt_encoding == DT_ENCODING_UTF8) { 719 utf8 = B_TRUE; 720 } else if ((term = getenv("TERM")) != NULL && 721 (strcmp(term, "sun") == 0 || 722 strcmp(term, "sun-color") == 0) || 723 strcmp(term, "dumb") == 0) { 724 utf8 = B_FALSE; 725 } else { 726 utf8 = B_TRUE; 727 } 728 } 729 730 if (datum == 0) 731 return (dt_printf(dtp, fp, " ")); 732 733 if (datum < 0) { 734 len = strlen(neg); 735 val = dt_fabsl(datum * (len - 1)) / total; 736 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)])); 737 } 738 739 if (utf8) { 740 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum * 741 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5); 742 743 return (dt_printf(dtp, fp, "%c%c%c", 744 DTRACE_AGGUTF8_BYTE0(block), 745 DTRACE_AGGUTF8_BYTE1(block), 746 DTRACE_AGGUTF8_BYTE2(block))); 747 } 748 749 len = strlen(ascii); 750 val = (datum * (len - 1)) / total; 751 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)])); 752 } 753 754 int 755 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 756 size_t size, uint64_t normal) 757 { 758 const int64_t *data = addr; 759 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1; 760 long double total = 0; 761 char positives = 0, negatives = 0; 762 763 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 764 return (dt_set_errno(dtp, EDT_DMISMATCH)); 765 766 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0) 767 first_bin++; 768 769 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) { 770 /* 771 * There isn't any data. This is possible if the aggregation 772 * has been clear()'d or if negative increment values have been 773 * used. Regardless, we'll print the buckets around 0. 774 */ 775 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1; 776 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1; 777 } else { 778 if (first_bin > 0) 779 first_bin--; 780 781 while (last_bin > 0 && data[last_bin] == 0) 782 last_bin--; 783 784 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1) 785 last_bin++; 786 } 787 788 for (i = first_bin; i <= last_bin; i++) { 789 positives |= (data[i] > 0); 790 negatives |= (data[i] < 0); 791 dt_quantize_total(dtp, data[i], &total); 792 } 793 794 if (dt_print_quanthdr(dtp, fp, 0) < 0) 795 return (-1); 796 797 for (i = first_bin; i <= last_bin; i++) { 798 if (dt_printf(dtp, fp, "%16lld ", 799 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0) 800 return (-1); 801 802 if (dt_print_quantline(dtp, fp, data[i], normal, total, 803 positives, negatives) < 0) 804 return (-1); 805 } 806 807 return (0); 808 } 809 810 int 811 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 812 size_t size, const dtrace_aggdata_t *aggdata) 813 { 814 const int64_t *data = addr; 815 long double total = 0, count = 0; 816 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i; 817 int64_t minval, maxval; 818 819 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 820 return (dt_set_errno(dtp, EDT_DMISMATCH)); 821 822 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 823 min--; 824 825 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 826 max++; 827 828 minval = DTRACE_QUANTIZE_BUCKETVAL(min); 829 maxval = DTRACE_QUANTIZE_BUCKETVAL(max); 830 831 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval), 832 (long long)minval) < 0) 833 return (-1); 834 835 for (i = min; i <= max; i++) { 836 dt_quantize_total(dtp, data[i], &total); 837 count += data[i]; 838 } 839 840 for (i = min; i <= max; i++) { 841 if (dt_print_packed(dtp, fp, data[i], total) < 0) 842 return (-1); 843 } 844 845 if (dt_printf(dtp, fp, ": %*lld | %lld\n", 846 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0) 847 return (-1); 848 849 return (0); 850 } 851 852 int 853 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 854 size_t size, uint64_t normal) 855 { 856 const int64_t *data = addr; 857 int i, first_bin, last_bin, base; 858 uint64_t arg; 859 long double total = 0; 860 uint16_t step, levels; 861 char positives = 0, negatives = 0; 862 863 if (size < sizeof (uint64_t)) 864 return (dt_set_errno(dtp, EDT_DMISMATCH)); 865 866 arg = *data++; 867 size -= sizeof (uint64_t); 868 869 base = DTRACE_LQUANTIZE_BASE(arg); 870 step = DTRACE_LQUANTIZE_STEP(arg); 871 levels = DTRACE_LQUANTIZE_LEVELS(arg); 872 873 first_bin = 0; 874 last_bin = levels + 1; 875 876 if (size != sizeof (uint64_t) * (levels + 2)) 877 return (dt_set_errno(dtp, EDT_DMISMATCH)); 878 879 while (first_bin <= levels + 1 && data[first_bin] == 0) 880 first_bin++; 881 882 if (first_bin > levels + 1) { 883 first_bin = 0; 884 last_bin = 2; 885 } else { 886 if (first_bin > 0) 887 first_bin--; 888 889 while (last_bin > 0 && data[last_bin] == 0) 890 last_bin--; 891 892 if (last_bin < levels + 1) 893 last_bin++; 894 } 895 896 for (i = first_bin; i <= last_bin; i++) { 897 positives |= (data[i] > 0); 898 negatives |= (data[i] < 0); 899 dt_quantize_total(dtp, data[i], &total); 900 } 901 902 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 903 "------------- Distribution -------------", "count") < 0) 904 return (-1); 905 906 for (i = first_bin; i <= last_bin; i++) { 907 char c[32]; 908 int err; 909 910 if (i == 0) { 911 (void) snprintf(c, sizeof (c), "< %d", base); 912 err = dt_printf(dtp, fp, "%16s ", c); 913 } else if (i == levels + 1) { 914 (void) snprintf(c, sizeof (c), ">= %d", 915 base + (levels * step)); 916 err = dt_printf(dtp, fp, "%16s ", c); 917 } else { 918 err = dt_printf(dtp, fp, "%16d ", 919 base + (i - 1) * step); 920 } 921 922 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal, 923 total, positives, negatives) < 0) 924 return (-1); 925 } 926 927 return (0); 928 } 929 930 /*ARGSUSED*/ 931 int 932 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 933 size_t size, const dtrace_aggdata_t *aggdata) 934 { 935 const int64_t *data = addr; 936 long double total = 0, count = 0; 937 int min, max, base, err; 938 uint64_t arg; 939 uint16_t step, levels; 940 char c[32]; 941 unsigned int i; 942 943 if (size < sizeof (uint64_t)) 944 return (dt_set_errno(dtp, EDT_DMISMATCH)); 945 946 arg = *data++; 947 size -= sizeof (uint64_t); 948 949 base = DTRACE_LQUANTIZE_BASE(arg); 950 step = DTRACE_LQUANTIZE_STEP(arg); 951 levels = DTRACE_LQUANTIZE_LEVELS(arg); 952 953 if (size != sizeof (uint64_t) * (levels + 2)) 954 return (dt_set_errno(dtp, EDT_DMISMATCH)); 955 956 min = 0; 957 max = levels + 1; 958 959 if (min == 0) { 960 (void) snprintf(c, sizeof (c), "< %d", base); 961 err = dt_printf(dtp, fp, "%8s :", c); 962 } else { 963 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step); 964 } 965 966 if (err < 0) 967 return (-1); 968 969 for (i = min; i <= max; i++) { 970 dt_quantize_total(dtp, data[i], &total); 971 count += data[i]; 972 } 973 974 for (i = min; i <= max; i++) { 975 if (dt_print_packed(dtp, fp, data[i], total) < 0) 976 return (-1); 977 } 978 979 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step)); 980 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count)); 981 } 982 983 int 984 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 985 size_t size, uint64_t normal) 986 { 987 int i, first_bin, last_bin, bin = 1, order, levels; 988 uint16_t factor, low, high, nsteps; 989 const int64_t *data = addr; 990 int64_t value = 1, next, step; 991 char positives = 0, negatives = 0; 992 long double total = 0; 993 uint64_t arg; 994 char c[32]; 995 996 if (size < sizeof (uint64_t)) 997 return (dt_set_errno(dtp, EDT_DMISMATCH)); 998 999 arg = *data++; 1000 size -= sizeof (uint64_t); 1001 1002 factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1003 low = DTRACE_LLQUANTIZE_LOW(arg); 1004 high = DTRACE_LLQUANTIZE_HIGH(arg); 1005 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1006 1007 /* 1008 * We don't expect to be handed invalid llquantize() parameters here, 1009 * but sanity check them (to a degree) nonetheless. 1010 */ 1011 if (size > INT32_MAX || factor < 2 || low >= high || 1012 nsteps == 0 || factor > nsteps) 1013 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1014 1015 levels = (int)size / sizeof (uint64_t); 1016 1017 first_bin = 0; 1018 last_bin = levels - 1; 1019 1020 while (first_bin < levels && data[first_bin] == 0) 1021 first_bin++; 1022 1023 if (first_bin == levels) { 1024 first_bin = 0; 1025 last_bin = 1; 1026 } else { 1027 if (first_bin > 0) 1028 first_bin--; 1029 1030 while (last_bin > 0 && data[last_bin] == 0) 1031 last_bin--; 1032 1033 if (last_bin < levels - 1) 1034 last_bin++; 1035 } 1036 1037 for (i = first_bin; i <= last_bin; i++) { 1038 positives |= (data[i] > 0); 1039 negatives |= (data[i] < 0); 1040 dt_quantize_total(dtp, data[i], &total); 1041 } 1042 1043 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 1044 "------------- Distribution -------------", "count") < 0) 1045 return (-1); 1046 1047 for (order = 0; order < low; order++) 1048 value *= factor; 1049 1050 next = value * factor; 1051 step = next > nsteps ? next / nsteps : 1; 1052 1053 if (first_bin == 0) { 1054 (void) snprintf(c, sizeof (c), "< %lld", value); 1055 1056 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1057 return (-1); 1058 1059 if (dt_print_quantline(dtp, fp, data[0], normal, 1060 total, positives, negatives) < 0) 1061 return (-1); 1062 } 1063 1064 while (order <= high) { 1065 if (bin >= first_bin && bin <= last_bin) { 1066 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0) 1067 return (-1); 1068 1069 if (dt_print_quantline(dtp, fp, data[bin], 1070 normal, total, positives, negatives) < 0) 1071 return (-1); 1072 } 1073 1074 assert(value < next); 1075 bin++; 1076 1077 if ((value += step) != next) 1078 continue; 1079 1080 next = value * factor; 1081 step = next > nsteps ? next / nsteps : 1; 1082 order++; 1083 } 1084 1085 if (last_bin < bin) 1086 return (0); 1087 1088 assert(last_bin == bin); 1089 (void) snprintf(c, sizeof (c), ">= %lld", value); 1090 1091 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1092 return (-1); 1093 1094 return (dt_print_quantline(dtp, fp, data[bin], normal, 1095 total, positives, negatives)); 1096 } 1097 1098 /*ARGSUSED*/ 1099 static int 1100 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1101 size_t size, uint64_t normal) 1102 { 1103 /* LINTED - alignment */ 1104 int64_t *data = (int64_t *)addr; 1105 1106 return (dt_printf(dtp, fp, " %16lld", data[0] ? 1107 (long long)(data[1] / (int64_t)normal / data[0]) : 0)); 1108 } 1109 1110 /*ARGSUSED*/ 1111 static int 1112 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1113 size_t size, uint64_t normal) 1114 { 1115 /* LINTED - alignment */ 1116 uint64_t *data = (uint64_t *)addr; 1117 1118 return (dt_printf(dtp, fp, " %16llu", data[0] ? 1119 (unsigned long long) dt_stddev(data, normal) : 0)); 1120 } 1121 1122 /*ARGSUSED*/ 1123 static int 1124 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1125 size_t nbytes, int width, int quiet, int forceraw) 1126 { 1127 /* 1128 * If the byte stream is a series of printable characters, followed by 1129 * a terminating byte, we print it out as a string. Otherwise, we 1130 * assume that it's something else and just print the bytes. 1131 */ 1132 int i, j, margin = 5; 1133 char *c = (char *)addr; 1134 1135 if (nbytes == 0) 1136 return (0); 1137 1138 if (forceraw) 1139 goto raw; 1140 1141 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET) 1142 goto raw; 1143 1144 for (i = 0; i < nbytes; i++) { 1145 /* 1146 * We define a "printable character" to be one for which 1147 * isprint(3C) returns non-zero, isspace(3C) returns non-zero, 1148 * or a character which is either backspace or the bell. 1149 * Backspace and the bell are regrettably special because 1150 * they fail the first two tests -- and yet they are entirely 1151 * printable. These are the only two control characters that 1152 * have meaning for the terminal and for which isprint(3C) and 1153 * isspace(3C) return 0. 1154 */ 1155 if (isprint(c[i]) || isspace(c[i]) || 1156 c[i] == '\b' || c[i] == '\a') 1157 continue; 1158 1159 if (c[i] == '\0' && i > 0) { 1160 /* 1161 * This looks like it might be a string. Before we 1162 * assume that it is indeed a string, check the 1163 * remainder of the byte range; if it contains 1164 * additional non-nul characters, we'll assume that 1165 * it's a binary stream that just happens to look like 1166 * a string, and we'll print out the individual bytes. 1167 */ 1168 for (j = i + 1; j < nbytes; j++) { 1169 if (c[j] != '\0') 1170 break; 1171 } 1172 1173 if (j != nbytes) 1174 break; 1175 1176 if (quiet) { 1177 return (dt_printf(dtp, fp, "%s", c)); 1178 } else { 1179 return (dt_printf(dtp, fp, " %s%*s", 1180 width < 0 ? " " : "", width, c)); 1181 } 1182 } 1183 1184 break; 1185 } 1186 1187 if (i == nbytes) { 1188 /* 1189 * The byte range is all printable characters, but there is 1190 * no trailing nul byte. We'll assume that it's a string and 1191 * print it as such. 1192 */ 1193 char *s = alloca(nbytes + 1); 1194 bcopy(c, s, nbytes); 1195 s[nbytes] = '\0'; 1196 return (dt_printf(dtp, fp, " %-*s", width, s)); 1197 } 1198 1199 raw: 1200 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0) 1201 return (-1); 1202 1203 for (i = 0; i < 16; i++) 1204 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0) 1205 return (-1); 1206 1207 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0) 1208 return (-1); 1209 1210 1211 for (i = 0; i < nbytes; i += 16) { 1212 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0) 1213 return (-1); 1214 1215 for (j = i; j < i + 16 && j < nbytes; j++) { 1216 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0) 1217 return (-1); 1218 } 1219 1220 while (j++ % 16) { 1221 if (dt_printf(dtp, fp, " ") < 0) 1222 return (-1); 1223 } 1224 1225 if (dt_printf(dtp, fp, " ") < 0) 1226 return (-1); 1227 1228 for (j = i; j < i + 16 && j < nbytes; j++) { 1229 if (dt_printf(dtp, fp, "%c", 1230 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0) 1231 return (-1); 1232 } 1233 1234 if (dt_printf(dtp, fp, "\n") < 0) 1235 return (-1); 1236 } 1237 1238 return (0); 1239 } 1240 1241 int 1242 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1243 caddr_t addr, int depth, int size) 1244 { 1245 dtrace_syminfo_t dts; 1246 GElf_Sym sym; 1247 int i, indent; 1248 char c[PATH_MAX * 2]; 1249 uint64_t pc; 1250 1251 if (dt_printf(dtp, fp, "\n") < 0) 1252 return (-1); 1253 1254 if (format == NULL) 1255 format = "%s"; 1256 1257 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1258 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1259 else 1260 indent = _dtrace_stkindent; 1261 1262 for (i = 0; i < depth; i++) { 1263 switch (size) { 1264 case sizeof (uint32_t): 1265 /* LINTED - alignment */ 1266 pc = *((uint32_t *)addr); 1267 break; 1268 1269 case sizeof (uint64_t): 1270 /* LINTED - alignment */ 1271 pc = *((uint64_t *)addr); 1272 break; 1273 1274 default: 1275 return (dt_set_errno(dtp, EDT_BADSTACKPC)); 1276 } 1277 1278 if (pc == NULL) 1279 break; 1280 1281 addr += size; 1282 1283 if (dt_printf(dtp, fp, "%*s", indent, "") < 0) 1284 return (-1); 1285 1286 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1287 if (pc > sym.st_value) { 1288 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx", 1289 dts.dts_object, dts.dts_name, 1290 pc - sym.st_value); 1291 } else { 1292 (void) snprintf(c, sizeof (c), "%s`%s", 1293 dts.dts_object, dts.dts_name); 1294 } 1295 } else { 1296 /* 1297 * We'll repeat the lookup, but this time we'll specify 1298 * a NULL GElf_Sym -- indicating that we're only 1299 * interested in the containing module. 1300 */ 1301 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1302 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1303 dts.dts_object, pc); 1304 } else { 1305 (void) snprintf(c, sizeof (c), "0x%llx", pc); 1306 } 1307 } 1308 1309 if (dt_printf(dtp, fp, format, c) < 0) 1310 return (-1); 1311 1312 if (dt_printf(dtp, fp, "\n") < 0) 1313 return (-1); 1314 } 1315 1316 return (0); 1317 } 1318 1319 int 1320 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1321 caddr_t addr, uint64_t arg) 1322 { 1323 /* LINTED - alignment */ 1324 uint64_t *pc = (uint64_t *)addr; 1325 uint32_t depth = DTRACE_USTACK_NFRAMES(arg); 1326 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg); 1327 const char *strbase = addr + (depth + 1) * sizeof (uint64_t); 1328 const char *str = strsize ? strbase : NULL; 1329 int err = 0; 1330 1331 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2]; 1332 struct ps_prochandle *P; 1333 GElf_Sym sym; 1334 int i, indent; 1335 pid_t pid; 1336 1337 if (depth == 0) 1338 return (0); 1339 1340 pid = (pid_t)*pc++; 1341 1342 if (dt_printf(dtp, fp, "\n") < 0) 1343 return (-1); 1344 1345 if (format == NULL) 1346 format = "%s"; 1347 1348 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1349 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1350 else 1351 indent = _dtrace_stkindent; 1352 1353 /* 1354 * Ultimately, we need to add an entry point in the library vector for 1355 * determining <symbol, offset> from <pid, address>. For now, if 1356 * this is a vector open, we just print the raw address or string. 1357 */ 1358 if (dtp->dt_vector == NULL) 1359 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1360 else 1361 P = NULL; 1362 1363 if (P != NULL) 1364 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1365 1366 for (i = 0; i < depth && pc[i] != NULL; i++) { 1367 const prmap_t *map; 1368 1369 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1370 break; 1371 1372 if (P != NULL && Plookup_by_addr(P, pc[i], 1373 name, sizeof (name), &sym) == 0) { 1374 (void) Pobjname(P, pc[i], objname, sizeof (objname)); 1375 1376 if (pc[i] > sym.st_value) { 1377 (void) snprintf(c, sizeof (c), 1378 "%s`%s+0x%llx", dt_basename(objname), name, 1379 (u_longlong_t)(pc[i] - sym.st_value)); 1380 } else { 1381 (void) snprintf(c, sizeof (c), 1382 "%s`%s", dt_basename(objname), name); 1383 } 1384 } else if (str != NULL && str[0] != '\0' && str[0] != '@' && 1385 (P == NULL || (map = Paddr_to_map(P, pc[i])) == NULL || 1386 map->pr_mflags & MA_WRITE)) { 1387 /* 1388 * If the current string pointer in the string table 1389 * does not point to an empty string _and_ the program 1390 * counter falls in a writable region, we'll use the 1391 * string from the string table instead of the raw 1392 * address. This last condition is necessary because 1393 * some (broken) ustack helpers will return a string 1394 * even for a program counter that they can't 1395 * identify. If we have a string for a program 1396 * counter that falls in a segment that isn't 1397 * writable, we assume that we have fallen into this 1398 * case and we refuse to use the string. Finally, 1399 * note that if we could not grab the process (e.g., 1400 * because it exited), the information from the helper 1401 * is better than nothing. 1402 */ 1403 (void) snprintf(c, sizeof (c), "%s", str); 1404 } else { 1405 if (P != NULL && Pobjname(P, pc[i], objname, 1406 sizeof (objname)) != NULL) { 1407 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1408 dt_basename(objname), (u_longlong_t)pc[i]); 1409 } else { 1410 (void) snprintf(c, sizeof (c), "0x%llx", 1411 (u_longlong_t)pc[i]); 1412 } 1413 } 1414 1415 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1416 break; 1417 1418 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1419 break; 1420 1421 if (str != NULL && str[0] == '@') { 1422 /* 1423 * If the first character of the string is an "at" sign, 1424 * then the string is inferred to be an annotation -- 1425 * and it is printed out beneath the frame and offset 1426 * with brackets. 1427 */ 1428 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1429 break; 1430 1431 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]); 1432 1433 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1434 break; 1435 1436 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1437 break; 1438 } 1439 1440 if (str != NULL) { 1441 str += strlen(str) + 1; 1442 if (str - strbase >= strsize) 1443 str = NULL; 1444 } 1445 } 1446 1447 if (P != NULL) { 1448 dt_proc_unlock(dtp, P); 1449 dt_proc_release(dtp, P); 1450 } 1451 1452 return (err); 1453 } 1454 1455 static int 1456 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act) 1457 { 1458 /* LINTED - alignment */ 1459 uint64_t pid = ((uint64_t *)addr)[0]; 1460 /* LINTED - alignment */ 1461 uint64_t pc = ((uint64_t *)addr)[1]; 1462 const char *format = " %-50s"; 1463 char *s; 1464 int n, len = 256; 1465 1466 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) { 1467 struct ps_prochandle *P; 1468 1469 if ((P = dt_proc_grab(dtp, pid, 1470 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) { 1471 GElf_Sym sym; 1472 1473 dt_proc_lock(dtp, P); 1474 1475 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0) 1476 pc = sym.st_value; 1477 1478 dt_proc_unlock(dtp, P); 1479 dt_proc_release(dtp, P); 1480 } 1481 } 1482 1483 do { 1484 n = len; 1485 s = alloca(n); 1486 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n); 1487 1488 return (dt_printf(dtp, fp, format, s)); 1489 } 1490 1491 int 1492 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1493 { 1494 /* LINTED - alignment */ 1495 uint64_t pid = ((uint64_t *)addr)[0]; 1496 /* LINTED - alignment */ 1497 uint64_t pc = ((uint64_t *)addr)[1]; 1498 int err = 0; 1499 1500 char objname[PATH_MAX], c[PATH_MAX * 2]; 1501 struct ps_prochandle *P; 1502 1503 if (format == NULL) 1504 format = " %-50s"; 1505 1506 /* 1507 * See the comment in dt_print_ustack() for the rationale for 1508 * printing raw addresses in the vectored case. 1509 */ 1510 if (dtp->dt_vector == NULL) 1511 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1512 else 1513 P = NULL; 1514 1515 if (P != NULL) 1516 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1517 1518 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != NULL) { 1519 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname)); 1520 } else { 1521 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1522 } 1523 1524 err = dt_printf(dtp, fp, format, c); 1525 1526 if (P != NULL) { 1527 dt_proc_unlock(dtp, P); 1528 dt_proc_release(dtp, P); 1529 } 1530 1531 return (err); 1532 } 1533 1534 static int 1535 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1536 { 1537 /* LINTED - alignment */ 1538 uint64_t pc = *((uint64_t *)addr); 1539 dtrace_syminfo_t dts; 1540 GElf_Sym sym; 1541 char c[PATH_MAX * 2]; 1542 1543 if (format == NULL) 1544 format = " %-50s"; 1545 1546 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1547 (void) snprintf(c, sizeof (c), "%s`%s", 1548 dts.dts_object, dts.dts_name); 1549 } else { 1550 /* 1551 * We'll repeat the lookup, but this time we'll specify a 1552 * NULL GElf_Sym -- indicating that we're only interested in 1553 * the containing module. 1554 */ 1555 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1556 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1557 dts.dts_object, (u_longlong_t)pc); 1558 } else { 1559 (void) snprintf(c, sizeof (c), "0x%llx", 1560 (u_longlong_t)pc); 1561 } 1562 } 1563 1564 if (dt_printf(dtp, fp, format, c) < 0) 1565 return (-1); 1566 1567 return (0); 1568 } 1569 1570 int 1571 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1572 { 1573 /* LINTED - alignment */ 1574 uint64_t pc = *((uint64_t *)addr); 1575 dtrace_syminfo_t dts; 1576 char c[PATH_MAX * 2]; 1577 1578 if (format == NULL) 1579 format = " %-50s"; 1580 1581 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1582 (void) snprintf(c, sizeof (c), "%s", dts.dts_object); 1583 } else { 1584 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1585 } 1586 1587 if (dt_printf(dtp, fp, format, c) < 0) 1588 return (-1); 1589 1590 return (0); 1591 } 1592 1593 typedef struct dt_normal { 1594 dtrace_aggvarid_t dtnd_id; 1595 uint64_t dtnd_normal; 1596 } dt_normal_t; 1597 1598 static int 1599 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1600 { 1601 dt_normal_t *normal = arg; 1602 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1603 dtrace_aggvarid_t id = normal->dtnd_id; 1604 1605 if (agg->dtagd_nrecs == 0) 1606 return (DTRACE_AGGWALK_NEXT); 1607 1608 if (agg->dtagd_varid != id) 1609 return (DTRACE_AGGWALK_NEXT); 1610 1611 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal; 1612 return (DTRACE_AGGWALK_NORMALIZE); 1613 } 1614 1615 static int 1616 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1617 { 1618 dt_normal_t normal; 1619 caddr_t addr; 1620 1621 /* 1622 * We (should) have two records: the aggregation ID followed by the 1623 * normalization value. 1624 */ 1625 addr = base + rec->dtrd_offset; 1626 1627 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1628 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1629 1630 /* LINTED - alignment */ 1631 normal.dtnd_id = *((dtrace_aggvarid_t *)addr); 1632 rec++; 1633 1634 if (rec->dtrd_action != DTRACEACT_LIBACT) 1635 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1636 1637 if (rec->dtrd_arg != DT_ACT_NORMALIZE) 1638 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1639 1640 addr = base + rec->dtrd_offset; 1641 1642 switch (rec->dtrd_size) { 1643 case sizeof (uint64_t): 1644 /* LINTED - alignment */ 1645 normal.dtnd_normal = *((uint64_t *)addr); 1646 break; 1647 case sizeof (uint32_t): 1648 /* LINTED - alignment */ 1649 normal.dtnd_normal = *((uint32_t *)addr); 1650 break; 1651 case sizeof (uint16_t): 1652 /* LINTED - alignment */ 1653 normal.dtnd_normal = *((uint16_t *)addr); 1654 break; 1655 case sizeof (uint8_t): 1656 normal.dtnd_normal = *((uint8_t *)addr); 1657 break; 1658 default: 1659 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1660 } 1661 1662 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal); 1663 1664 return (0); 1665 } 1666 1667 static int 1668 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1669 { 1670 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1671 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1672 1673 if (agg->dtagd_nrecs == 0) 1674 return (DTRACE_AGGWALK_NEXT); 1675 1676 if (agg->dtagd_varid != id) 1677 return (DTRACE_AGGWALK_NEXT); 1678 1679 return (DTRACE_AGGWALK_DENORMALIZE); 1680 } 1681 1682 static int 1683 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg) 1684 { 1685 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1686 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1687 1688 if (agg->dtagd_nrecs == 0) 1689 return (DTRACE_AGGWALK_NEXT); 1690 1691 if (agg->dtagd_varid != id) 1692 return (DTRACE_AGGWALK_NEXT); 1693 1694 return (DTRACE_AGGWALK_CLEAR); 1695 } 1696 1697 typedef struct dt_trunc { 1698 dtrace_aggvarid_t dttd_id; 1699 uint64_t dttd_remaining; 1700 } dt_trunc_t; 1701 1702 static int 1703 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg) 1704 { 1705 dt_trunc_t *trunc = arg; 1706 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1707 dtrace_aggvarid_t id = trunc->dttd_id; 1708 1709 if (agg->dtagd_nrecs == 0) 1710 return (DTRACE_AGGWALK_NEXT); 1711 1712 if (agg->dtagd_varid != id) 1713 return (DTRACE_AGGWALK_NEXT); 1714 1715 if (trunc->dttd_remaining == 0) 1716 return (DTRACE_AGGWALK_REMOVE); 1717 1718 trunc->dttd_remaining--; 1719 return (DTRACE_AGGWALK_NEXT); 1720 } 1721 1722 static int 1723 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1724 { 1725 dt_trunc_t trunc; 1726 caddr_t addr; 1727 int64_t remaining; 1728 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *); 1729 1730 /* 1731 * We (should) have two records: the aggregation ID followed by the 1732 * number of aggregation entries after which the aggregation is to be 1733 * truncated. 1734 */ 1735 addr = base + rec->dtrd_offset; 1736 1737 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1738 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1739 1740 /* LINTED - alignment */ 1741 trunc.dttd_id = *((dtrace_aggvarid_t *)addr); 1742 rec++; 1743 1744 if (rec->dtrd_action != DTRACEACT_LIBACT) 1745 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1746 1747 if (rec->dtrd_arg != DT_ACT_TRUNC) 1748 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1749 1750 addr = base + rec->dtrd_offset; 1751 1752 switch (rec->dtrd_size) { 1753 case sizeof (uint64_t): 1754 /* LINTED - alignment */ 1755 remaining = *((int64_t *)addr); 1756 break; 1757 case sizeof (uint32_t): 1758 /* LINTED - alignment */ 1759 remaining = *((int32_t *)addr); 1760 break; 1761 case sizeof (uint16_t): 1762 /* LINTED - alignment */ 1763 remaining = *((int16_t *)addr); 1764 break; 1765 case sizeof (uint8_t): 1766 remaining = *((int8_t *)addr); 1767 break; 1768 default: 1769 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1770 } 1771 1772 if (remaining < 0) { 1773 func = dtrace_aggregate_walk_valsorted; 1774 remaining = -remaining; 1775 } else { 1776 func = dtrace_aggregate_walk_valrevsorted; 1777 } 1778 1779 assert(remaining >= 0); 1780 trunc.dttd_remaining = remaining; 1781 1782 (void) func(dtp, dt_trunc_agg, &trunc); 1783 1784 return (0); 1785 } 1786 1787 static int 1788 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec, 1789 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata, 1790 uint64_t normal, dt_print_aggdata_t *pd) 1791 { 1792 int err, width; 1793 dtrace_actkind_t act = rec->dtrd_action; 1794 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack; 1795 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1796 1797 static struct { 1798 size_t size; 1799 int width; 1800 int packedwidth; 1801 } *fmt, fmttab[] = { 1802 { sizeof (uint8_t), 3, 3 }, 1803 { sizeof (uint16_t), 5, 5 }, 1804 { sizeof (uint32_t), 8, 8 }, 1805 { sizeof (uint64_t), 16, 16 }, 1806 { 0, -50, 16 } 1807 }; 1808 1809 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) { 1810 dtrace_recdesc_t *r; 1811 1812 width = 0; 1813 1814 /* 1815 * To print our quantization header for either an agghist or 1816 * aggpack aggregation, we need to iterate through all of our 1817 * of our records to determine their width. 1818 */ 1819 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) { 1820 for (fmt = fmttab; fmt->size && 1821 fmt->size != r->dtrd_size; fmt++) 1822 continue; 1823 1824 width += fmt->packedwidth + 1; 1825 } 1826 1827 if (pd->dtpa_agghist) { 1828 if (dt_print_quanthdr(dtp, fp, width) < 0) 1829 return (-1); 1830 } else { 1831 if (dt_print_quanthdr_packed(dtp, fp, 1832 width, aggdata, r->dtrd_action) < 0) 1833 return (-1); 1834 } 1835 1836 pd->dtpa_agghisthdr = agg->dtagd_varid; 1837 } 1838 1839 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) { 1840 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES; 1841 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES; 1842 int64_t val; 1843 1844 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT); 1845 val = (long long)*((uint64_t *)addr); 1846 1847 if (dt_printf(dtp, fp, " ") < 0) 1848 return (-1); 1849 1850 return (dt_print_quantline(dtp, fp, val, normal, 1851 aggdata->dtada_total, positives, negatives)); 1852 } 1853 1854 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) { 1855 switch (act) { 1856 case DTRACEAGG_QUANTIZE: 1857 return (dt_print_quantize_packed(dtp, 1858 fp, addr, size, aggdata)); 1859 case DTRACEAGG_LQUANTIZE: 1860 return (dt_print_lquantize_packed(dtp, 1861 fp, addr, size, aggdata)); 1862 default: 1863 break; 1864 } 1865 } 1866 1867 switch (act) { 1868 case DTRACEACT_STACK: 1869 return (dt_print_stack(dtp, fp, NULL, addr, 1870 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg)); 1871 1872 case DTRACEACT_USTACK: 1873 case DTRACEACT_JSTACK: 1874 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg)); 1875 1876 case DTRACEACT_USYM: 1877 case DTRACEACT_UADDR: 1878 return (dt_print_usym(dtp, fp, addr, act)); 1879 1880 case DTRACEACT_UMOD: 1881 return (dt_print_umod(dtp, fp, NULL, addr)); 1882 1883 case DTRACEACT_SYM: 1884 return (dt_print_sym(dtp, fp, NULL, addr)); 1885 1886 case DTRACEACT_MOD: 1887 return (dt_print_mod(dtp, fp, NULL, addr)); 1888 1889 case DTRACEAGG_QUANTIZE: 1890 return (dt_print_quantize(dtp, fp, addr, size, normal)); 1891 1892 case DTRACEAGG_LQUANTIZE: 1893 return (dt_print_lquantize(dtp, fp, addr, size, normal)); 1894 1895 case DTRACEAGG_LLQUANTIZE: 1896 return (dt_print_llquantize(dtp, fp, addr, size, normal)); 1897 1898 case DTRACEAGG_AVG: 1899 return (dt_print_average(dtp, fp, addr, size, normal)); 1900 1901 case DTRACEAGG_STDDEV: 1902 return (dt_print_stddev(dtp, fp, addr, size, normal)); 1903 1904 default: 1905 break; 1906 } 1907 1908 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++) 1909 continue; 1910 1911 width = packed ? fmt->packedwidth : fmt->width; 1912 1913 switch (size) { 1914 case sizeof (uint64_t): 1915 err = dt_printf(dtp, fp, " %*lld", width, 1916 /* LINTED - alignment */ 1917 (long long)*((uint64_t *)addr) / normal); 1918 break; 1919 case sizeof (uint32_t): 1920 /* LINTED - alignment */ 1921 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) / 1922 (uint32_t)normal); 1923 break; 1924 case sizeof (uint16_t): 1925 /* LINTED - alignment */ 1926 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) / 1927 (uint32_t)normal); 1928 break; 1929 case sizeof (uint8_t): 1930 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) / 1931 (uint32_t)normal); 1932 break; 1933 default: 1934 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0); 1935 break; 1936 } 1937 1938 return (err); 1939 } 1940 1941 int 1942 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg) 1943 { 1944 int i, aggact = 0; 1945 dt_print_aggdata_t *pd = arg; 1946 const dtrace_aggdata_t *aggdata = aggsdata[0]; 1947 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1948 FILE *fp = pd->dtpa_fp; 1949 dtrace_hdl_t *dtp = pd->dtpa_dtp; 1950 dtrace_recdesc_t *rec; 1951 dtrace_actkind_t act; 1952 caddr_t addr; 1953 size_t size; 1954 1955 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL); 1956 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN); 1957 1958 /* 1959 * Iterate over each record description in the key, printing the traced 1960 * data, skipping the first datum (the tuple member created by the 1961 * compiler). 1962 */ 1963 for (i = 1; i < agg->dtagd_nrecs; i++) { 1964 rec = &agg->dtagd_rec[i]; 1965 act = rec->dtrd_action; 1966 addr = aggdata->dtada_data + rec->dtrd_offset; 1967 size = rec->dtrd_size; 1968 1969 if (DTRACEACT_ISAGG(act)) { 1970 aggact = i; 1971 break; 1972 } 1973 1974 if (dt_print_datum(dtp, fp, rec, addr, 1975 size, aggdata, 1, pd) < 0) 1976 return (-1); 1977 1978 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 1979 DTRACE_BUFDATA_AGGKEY) < 0) 1980 return (-1); 1981 } 1982 1983 assert(aggact != 0); 1984 1985 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) { 1986 uint64_t normal; 1987 1988 aggdata = aggsdata[i]; 1989 agg = aggdata->dtada_desc; 1990 rec = &agg->dtagd_rec[aggact]; 1991 act = rec->dtrd_action; 1992 addr = aggdata->dtada_data + rec->dtrd_offset; 1993 size = rec->dtrd_size; 1994 1995 assert(DTRACEACT_ISAGG(act)); 1996 normal = aggdata->dtada_normal; 1997 1998 if (dt_print_datum(dtp, fp, rec, addr, 1999 size, aggdata, normal, pd) < 0) 2000 return (-1); 2001 2002 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 2003 DTRACE_BUFDATA_AGGVAL) < 0) 2004 return (-1); 2005 2006 if (!pd->dtpa_allunprint) 2007 agg->dtagd_flags |= DTRACE_AGD_PRINTED; 2008 } 2009 2010 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) { 2011 if (dt_printf(dtp, fp, "\n") < 0) 2012 return (-1); 2013 } 2014 2015 if (dt_buffered_flush(dtp, NULL, NULL, aggdata, 2016 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0) 2017 return (-1); 2018 2019 return (0); 2020 } 2021 2022 int 2023 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg) 2024 { 2025 dt_print_aggdata_t *pd = arg; 2026 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2027 dtrace_aggvarid_t aggvarid = pd->dtpa_id; 2028 2029 if (pd->dtpa_allunprint) { 2030 if (agg->dtagd_flags & DTRACE_AGD_PRINTED) 2031 return (0); 2032 } else { 2033 /* 2034 * If we're not printing all unprinted aggregations, then the 2035 * aggregation variable ID denotes a specific aggregation 2036 * variable that we should print -- skip any other aggregations 2037 * that we encounter. 2038 */ 2039 if (agg->dtagd_nrecs == 0) 2040 return (0); 2041 2042 if (aggvarid != agg->dtagd_varid) 2043 return (0); 2044 } 2045 2046 return (dt_print_aggs(&aggdata, 1, arg)); 2047 } 2048 2049 int 2050 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data, 2051 const char *option, const char *value) 2052 { 2053 int len, rval; 2054 char *msg; 2055 const char *errstr; 2056 dtrace_setoptdata_t optdata; 2057 2058 bzero(&optdata, sizeof (optdata)); 2059 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval); 2060 2061 if (dtrace_setopt(dtp, option, value) == 0) { 2062 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval); 2063 optdata.dtsda_probe = data; 2064 optdata.dtsda_option = option; 2065 optdata.dtsda_handle = dtp; 2066 2067 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0) 2068 return (rval); 2069 2070 return (0); 2071 } 2072 2073 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp)); 2074 len = strlen(option) + strlen(value) + strlen(errstr) + 80; 2075 msg = alloca(len); 2076 2077 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n", 2078 option, value, errstr); 2079 2080 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0) 2081 return (0); 2082 2083 return (rval); 2084 } 2085 2086 static int 2087 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu, 2088 dtrace_bufdesc_t *buf, boolean_t just_one, 2089 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg) 2090 { 2091 dtrace_epid_t id; 2092 size_t offs; 2093 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET); 2094 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 2095 int rval, i, n; 2096 uint64_t tracememsize = 0; 2097 dtrace_probedata_t data; 2098 uint64_t drops; 2099 2100 bzero(&data, sizeof (data)); 2101 data.dtpda_handle = dtp; 2102 data.dtpda_cpu = cpu; 2103 data.dtpda_flow = dtp->dt_flow; 2104 data.dtpda_indent = dtp->dt_indent; 2105 data.dtpda_prefix = dtp->dt_prefix; 2106 2107 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) { 2108 dtrace_eprobedesc_t *epd; 2109 2110 /* 2111 * We're guaranteed to have an ID. 2112 */ 2113 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 2114 2115 if (id == DTRACE_EPIDNONE) { 2116 /* 2117 * This is filler to assure proper alignment of the 2118 * next record; we simply ignore it. 2119 */ 2120 offs += sizeof (id); 2121 continue; 2122 } 2123 2124 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc, 2125 &data.dtpda_pdesc)) != 0) 2126 return (rval); 2127 2128 epd = data.dtpda_edesc; 2129 data.dtpda_data = buf->dtbd_data + offs; 2130 2131 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) { 2132 rval = dt_handle(dtp, &data); 2133 2134 if (rval == DTRACE_CONSUME_NEXT) 2135 goto nextepid; 2136 2137 if (rval == DTRACE_CONSUME_ERROR) 2138 return (-1); 2139 } 2140 2141 if (flow) 2142 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid, 2143 buf, offs); 2144 2145 rval = (*efunc)(&data, arg); 2146 2147 if (flow) { 2148 if (data.dtpda_flow == DTRACEFLOW_ENTRY) 2149 data.dtpda_indent += 2; 2150 } 2151 2152 if (rval == DTRACE_CONSUME_NEXT) 2153 goto nextepid; 2154 2155 if (rval == DTRACE_CONSUME_ABORT) 2156 return (dt_set_errno(dtp, EDT_DIRABORT)); 2157 2158 if (rval != DTRACE_CONSUME_THIS) 2159 return (dt_set_errno(dtp, EDT_BADRVAL)); 2160 2161 for (i = 0; i < epd->dtepd_nrecs; i++) { 2162 caddr_t addr; 2163 dtrace_recdesc_t *rec = &epd->dtepd_rec[i]; 2164 dtrace_actkind_t act = rec->dtrd_action; 2165 2166 data.dtpda_data = buf->dtbd_data + offs + 2167 rec->dtrd_offset; 2168 addr = data.dtpda_data; 2169 2170 if (act == DTRACEACT_LIBACT) { 2171 uint64_t arg = rec->dtrd_arg; 2172 dtrace_aggvarid_t id; 2173 2174 switch (arg) { 2175 case DT_ACT_CLEAR: 2176 /* LINTED - alignment */ 2177 id = *((dtrace_aggvarid_t *)addr); 2178 (void) dtrace_aggregate_walk(dtp, 2179 dt_clear_agg, &id); 2180 continue; 2181 2182 case DT_ACT_DENORMALIZE: 2183 /* LINTED - alignment */ 2184 id = *((dtrace_aggvarid_t *)addr); 2185 (void) dtrace_aggregate_walk(dtp, 2186 dt_denormalize_agg, &id); 2187 continue; 2188 2189 case DT_ACT_FTRUNCATE: 2190 if (fp == NULL) 2191 continue; 2192 2193 (void) fflush(fp); 2194 (void) ftruncate(fileno(fp), 0); 2195 (void) fseeko(fp, 0, SEEK_SET); 2196 continue; 2197 2198 case DT_ACT_NORMALIZE: 2199 if (i == epd->dtepd_nrecs - 1) 2200 return (dt_set_errno(dtp, 2201 EDT_BADNORMAL)); 2202 2203 if (dt_normalize(dtp, 2204 buf->dtbd_data + offs, rec) != 0) 2205 return (-1); 2206 2207 i++; 2208 continue; 2209 2210 case DT_ACT_SETOPT: { 2211 uint64_t *opts = dtp->dt_options; 2212 dtrace_recdesc_t *valrec; 2213 uint32_t valsize; 2214 caddr_t val; 2215 int rv; 2216 2217 if (i == epd->dtepd_nrecs - 1) { 2218 return (dt_set_errno(dtp, 2219 EDT_BADSETOPT)); 2220 } 2221 2222 valrec = &epd->dtepd_rec[++i]; 2223 valsize = valrec->dtrd_size; 2224 2225 if (valrec->dtrd_action != act || 2226 valrec->dtrd_arg != arg) { 2227 return (dt_set_errno(dtp, 2228 EDT_BADSETOPT)); 2229 } 2230 2231 if (valsize > sizeof (uint64_t)) { 2232 val = buf->dtbd_data + offs + 2233 valrec->dtrd_offset; 2234 } else { 2235 val = "1"; 2236 } 2237 2238 rv = dt_setopt(dtp, &data, addr, val); 2239 2240 if (rv != 0) 2241 return (-1); 2242 2243 flow = (opts[DTRACEOPT_FLOWINDENT] != 2244 DTRACEOPT_UNSET); 2245 quiet = (opts[DTRACEOPT_QUIET] != 2246 DTRACEOPT_UNSET); 2247 2248 continue; 2249 } 2250 2251 case DT_ACT_TRUNC: 2252 if (i == epd->dtepd_nrecs - 1) 2253 return (dt_set_errno(dtp, 2254 EDT_BADTRUNC)); 2255 2256 if (dt_trunc(dtp, 2257 buf->dtbd_data + offs, rec) != 0) 2258 return (-1); 2259 2260 i++; 2261 continue; 2262 2263 default: 2264 continue; 2265 } 2266 } 2267 2268 if (act == DTRACEACT_TRACEMEM_DYNSIZE && 2269 rec->dtrd_size == sizeof (uint64_t)) { 2270 /* LINTED - alignment */ 2271 tracememsize = *((unsigned long long *)addr); 2272 continue; 2273 } 2274 2275 rval = (*rfunc)(&data, rec, arg); 2276 2277 if (rval == DTRACE_CONSUME_NEXT) 2278 continue; 2279 2280 if (rval == DTRACE_CONSUME_ABORT) 2281 return (dt_set_errno(dtp, EDT_DIRABORT)); 2282 2283 if (rval != DTRACE_CONSUME_THIS) 2284 return (dt_set_errno(dtp, EDT_BADRVAL)); 2285 2286 if (act == DTRACEACT_STACK) { 2287 int depth = rec->dtrd_arg; 2288 2289 if (dt_print_stack(dtp, fp, NULL, addr, depth, 2290 rec->dtrd_size / depth) < 0) 2291 return (-1); 2292 goto nextrec; 2293 } 2294 2295 if (act == DTRACEACT_USTACK || 2296 act == DTRACEACT_JSTACK) { 2297 if (dt_print_ustack(dtp, fp, NULL, 2298 addr, rec->dtrd_arg) < 0) 2299 return (-1); 2300 goto nextrec; 2301 } 2302 2303 if (act == DTRACEACT_SYM) { 2304 if (dt_print_sym(dtp, fp, NULL, addr) < 0) 2305 return (-1); 2306 goto nextrec; 2307 } 2308 2309 if (act == DTRACEACT_MOD) { 2310 if (dt_print_mod(dtp, fp, NULL, addr) < 0) 2311 return (-1); 2312 goto nextrec; 2313 } 2314 2315 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) { 2316 if (dt_print_usym(dtp, fp, addr, act) < 0) 2317 return (-1); 2318 goto nextrec; 2319 } 2320 2321 if (act == DTRACEACT_UMOD) { 2322 if (dt_print_umod(dtp, fp, NULL, addr) < 0) 2323 return (-1); 2324 goto nextrec; 2325 } 2326 2327 if (DTRACEACT_ISPRINTFLIKE(act)) { 2328 void *fmtdata; 2329 int (*func)(dtrace_hdl_t *, FILE *, void *, 2330 const dtrace_probedata_t *, 2331 const dtrace_recdesc_t *, uint_t, 2332 const void *buf, size_t); 2333 2334 if ((fmtdata = dt_format_lookup(dtp, 2335 rec->dtrd_format)) == NULL) 2336 goto nofmt; 2337 2338 switch (act) { 2339 case DTRACEACT_PRINTF: 2340 func = dtrace_fprintf; 2341 break; 2342 case DTRACEACT_PRINTA: 2343 func = dtrace_fprinta; 2344 break; 2345 case DTRACEACT_SYSTEM: 2346 func = dtrace_system; 2347 break; 2348 case DTRACEACT_FREOPEN: 2349 func = dtrace_freopen; 2350 break; 2351 } 2352 2353 n = (*func)(dtp, fp, fmtdata, &data, 2354 rec, epd->dtepd_nrecs - i, 2355 (uchar_t *)buf->dtbd_data + offs, 2356 buf->dtbd_size - offs); 2357 2358 if (n < 0) 2359 return (-1); /* errno is set for us */ 2360 2361 if (n > 0) 2362 i += n - 1; 2363 goto nextrec; 2364 } 2365 2366 /* 2367 * If this is a DIF expression, and the record has a 2368 * format set, this indicates we have a CTF type name 2369 * associated with the data and we should try to print 2370 * it out by type. 2371 */ 2372 if (act == DTRACEACT_DIFEXPR) { 2373 const char *strdata = dt_strdata_lookup(dtp, 2374 rec->dtrd_format); 2375 if (strdata != NULL) { 2376 n = dtrace_print(dtp, fp, strdata, 2377 addr, rec->dtrd_size); 2378 2379 /* 2380 * dtrace_print() will return -1 on 2381 * error, or return the number of bytes 2382 * consumed. It will return 0 if the 2383 * type couldn't be determined, and we 2384 * should fall through to the normal 2385 * trace method. 2386 */ 2387 if (n < 0) 2388 return (-1); 2389 2390 if (n > 0) 2391 goto nextrec; 2392 } 2393 } 2394 2395 nofmt: 2396 if (act == DTRACEACT_PRINTA) { 2397 dt_print_aggdata_t pd; 2398 dtrace_aggvarid_t *aggvars; 2399 int j, naggvars = 0; 2400 size_t size = ((epd->dtepd_nrecs - i) * 2401 sizeof (dtrace_aggvarid_t)); 2402 2403 if ((aggvars = dt_alloc(dtp, size)) == NULL) 2404 return (-1); 2405 2406 /* 2407 * This might be a printa() with multiple 2408 * aggregation variables. We need to scan 2409 * forward through the records until we find 2410 * a record from a different statement. 2411 */ 2412 for (j = i; j < epd->dtepd_nrecs; j++) { 2413 dtrace_recdesc_t *nrec; 2414 caddr_t naddr; 2415 2416 nrec = &epd->dtepd_rec[j]; 2417 2418 if (nrec->dtrd_uarg != rec->dtrd_uarg) 2419 break; 2420 2421 if (nrec->dtrd_action != act) { 2422 return (dt_set_errno(dtp, 2423 EDT_BADAGG)); 2424 } 2425 2426 naddr = buf->dtbd_data + offs + 2427 nrec->dtrd_offset; 2428 2429 aggvars[naggvars++] = 2430 /* LINTED - alignment */ 2431 *((dtrace_aggvarid_t *)naddr); 2432 } 2433 2434 i = j - 1; 2435 bzero(&pd, sizeof (pd)); 2436 pd.dtpa_dtp = dtp; 2437 pd.dtpa_fp = fp; 2438 2439 assert(naggvars >= 1); 2440 2441 if (naggvars == 1) { 2442 pd.dtpa_id = aggvars[0]; 2443 dt_free(dtp, aggvars); 2444 2445 if (dt_printf(dtp, fp, "\n") < 0 || 2446 dtrace_aggregate_walk_sorted(dtp, 2447 dt_print_agg, &pd) < 0) 2448 return (-1); 2449 goto nextrec; 2450 } 2451 2452 if (dt_printf(dtp, fp, "\n") < 0 || 2453 dtrace_aggregate_walk_joined(dtp, aggvars, 2454 naggvars, dt_print_aggs, &pd) < 0) { 2455 dt_free(dtp, aggvars); 2456 return (-1); 2457 } 2458 2459 dt_free(dtp, aggvars); 2460 goto nextrec; 2461 } 2462 2463 if (act == DTRACEACT_TRACEMEM) { 2464 if (tracememsize == 0 || 2465 tracememsize > rec->dtrd_size) { 2466 tracememsize = rec->dtrd_size; 2467 } 2468 2469 n = dt_print_bytes(dtp, fp, addr, 2470 tracememsize, -33, quiet, 1); 2471 2472 tracememsize = 0; 2473 2474 if (n < 0) 2475 return (-1); 2476 2477 goto nextrec; 2478 } 2479 2480 switch (rec->dtrd_size) { 2481 case sizeof (uint64_t): 2482 n = dt_printf(dtp, fp, 2483 quiet ? "%lld" : " %16lld", 2484 /* LINTED - alignment */ 2485 *((unsigned long long *)addr)); 2486 break; 2487 case sizeof (uint32_t): 2488 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", 2489 /* LINTED - alignment */ 2490 *((uint32_t *)addr)); 2491 break; 2492 case sizeof (uint16_t): 2493 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", 2494 /* LINTED - alignment */ 2495 *((uint16_t *)addr)); 2496 break; 2497 case sizeof (uint8_t): 2498 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", 2499 *((uint8_t *)addr)); 2500 break; 2501 default: 2502 n = dt_print_bytes(dtp, fp, addr, 2503 rec->dtrd_size, -33, quiet, 0); 2504 break; 2505 } 2506 2507 if (n < 0) 2508 return (-1); /* errno is set for us */ 2509 2510 nextrec: 2511 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0) 2512 return (-1); /* errno is set for us */ 2513 } 2514 2515 /* 2516 * Call the record callback with a NULL record to indicate 2517 * that we're done processing this EPID. 2518 */ 2519 rval = (*rfunc)(&data, NULL, arg); 2520 nextepid: 2521 offs += epd->dtepd_size; 2522 dtp->dt_last_epid = id; 2523 if (just_one) { 2524 buf->dtbd_oldest = offs; 2525 break; 2526 } 2527 } 2528 2529 dtp->dt_flow = data.dtpda_flow; 2530 dtp->dt_indent = data.dtpda_indent; 2531 dtp->dt_prefix = data.dtpda_prefix; 2532 2533 if ((drops = buf->dtbd_drops) == 0) 2534 return (0); 2535 2536 /* 2537 * Explicitly zero the drops to prevent us from processing them again. 2538 */ 2539 buf->dtbd_drops = 0; 2540 2541 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops)); 2542 } 2543 2544 /* 2545 * Reduce memory usage by shrinking the buffer if it's no more than half full. 2546 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is 2547 * only 4-byte aligned. 2548 */ 2549 static void 2550 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize) 2551 { 2552 uint64_t used = buf->dtbd_size - buf->dtbd_oldest; 2553 if (used < cursize / 2) { 2554 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2555 char *newdata = dt_alloc(dtp, used + misalign); 2556 if (newdata == NULL) 2557 return; 2558 bzero(newdata, misalign); 2559 bcopy(buf->dtbd_data + buf->dtbd_oldest, 2560 newdata + misalign, used); 2561 dt_free(dtp, buf->dtbd_data); 2562 buf->dtbd_oldest = misalign; 2563 buf->dtbd_size = used + misalign; 2564 buf->dtbd_data = newdata; 2565 } 2566 } 2567 2568 /* 2569 * If the ring buffer has wrapped, the data is not in order. Rearrange it 2570 * so that it is. Note, we need to preserve the alignment of the data at 2571 * dtbd_oldest, which is only 4-byte aligned. 2572 */ 2573 static int 2574 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2575 { 2576 int misalign; 2577 char *newdata, *ndp; 2578 2579 if (buf->dtbd_oldest == 0) 2580 return (0); 2581 2582 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2583 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign); 2584 2585 if (newdata == NULL) 2586 return (-1); 2587 2588 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1))); 2589 2590 bzero(ndp, misalign); 2591 ndp += misalign; 2592 2593 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp, 2594 buf->dtbd_size - buf->dtbd_oldest); 2595 ndp += buf->dtbd_size - buf->dtbd_oldest; 2596 2597 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest); 2598 2599 dt_free(dtp, buf->dtbd_data); 2600 buf->dtbd_oldest = 0; 2601 buf->dtbd_data = newdata; 2602 buf->dtbd_size += misalign; 2603 2604 return (0); 2605 } 2606 2607 static void 2608 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2609 { 2610 dt_free(dtp, buf->dtbd_data); 2611 dt_free(dtp, buf); 2612 } 2613 2614 /* 2615 * Returns 0 on success, in which case *cbp will be filled in if we retrieved 2616 * data, or NULL if there is no data for this CPU. 2617 * Returns -1 on failure and sets dt_errno. 2618 */ 2619 static int 2620 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp) 2621 { 2622 dtrace_optval_t size; 2623 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf)); 2624 int error; 2625 2626 if (buf == NULL) 2627 return (-1); 2628 2629 (void) dtrace_getopt(dtp, "bufsize", &size); 2630 buf->dtbd_data = dt_alloc(dtp, size); 2631 if (buf->dtbd_data == NULL) { 2632 dt_free(dtp, buf); 2633 return (-1); 2634 } 2635 buf->dtbd_size = size; 2636 buf->dtbd_cpu = cpu; 2637 2638 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2639 dt_put_buf(dtp, buf); 2640 /* 2641 * If we failed with ENOENT, it may be because the 2642 * CPU was unconfigured -- this is okay. Any other 2643 * error, however, is unexpected. 2644 */ 2645 if (errno == ENOENT) { 2646 *bufp = NULL; 2647 return (0); 2648 } 2649 2650 return (dt_set_errno(dtp, errno)); 2651 } 2652 2653 error = dt_unring_buf(dtp, buf); 2654 if (error != 0) { 2655 dt_put_buf(dtp, buf); 2656 return (error); 2657 } 2658 dt_realloc_buf(dtp, buf, size); 2659 2660 *bufp = buf; 2661 return (0); 2662 } 2663 2664 typedef struct dt_begin { 2665 dtrace_consume_probe_f *dtbgn_probefunc; 2666 dtrace_consume_rec_f *dtbgn_recfunc; 2667 void *dtbgn_arg; 2668 dtrace_handle_err_f *dtbgn_errhdlr; 2669 void *dtbgn_errarg; 2670 int dtbgn_beginonly; 2671 } dt_begin_t; 2672 2673 static int 2674 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg) 2675 { 2676 dt_begin_t *begin = arg; 2677 dtrace_probedesc_t *pd = data->dtpda_pdesc; 2678 2679 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2680 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2681 2682 if (begin->dtbgn_beginonly) { 2683 if (!(r1 && r2)) 2684 return (DTRACE_CONSUME_NEXT); 2685 } else { 2686 if (r1 && r2) 2687 return (DTRACE_CONSUME_NEXT); 2688 } 2689 2690 /* 2691 * We have a record that we're interested in. Now call the underlying 2692 * probe function... 2693 */ 2694 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg)); 2695 } 2696 2697 static int 2698 dt_consume_begin_record(const dtrace_probedata_t *data, 2699 const dtrace_recdesc_t *rec, void *arg) 2700 { 2701 dt_begin_t *begin = arg; 2702 2703 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg)); 2704 } 2705 2706 static int 2707 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg) 2708 { 2709 dt_begin_t *begin = (dt_begin_t *)arg; 2710 dtrace_probedesc_t *pd = data->dteda_pdesc; 2711 2712 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2713 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2714 2715 if (begin->dtbgn_beginonly) { 2716 if (!(r1 && r2)) 2717 return (DTRACE_HANDLE_OK); 2718 } else { 2719 if (r1 && r2) 2720 return (DTRACE_HANDLE_OK); 2721 } 2722 2723 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg)); 2724 } 2725 2726 static int 2727 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, 2728 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2729 { 2730 /* 2731 * There's this idea that the BEGIN probe should be processed before 2732 * everything else, and that the END probe should be processed after 2733 * anything else. In the common case, this is pretty easy to deal 2734 * with. However, a situation may arise where the BEGIN enabling and 2735 * END enabling are on the same CPU, and some enabling in the middle 2736 * occurred on a different CPU. To deal with this (blech!) we need to 2737 * consume the BEGIN buffer up until the end of the BEGIN probe, and 2738 * then set it aside. We will then process every other CPU, and then 2739 * we'll return to the BEGIN CPU and process the rest of the data 2740 * (which will inevitably include the END probe, if any). Making this 2741 * even more complicated (!) is the library's ERROR enabling. Because 2742 * this enabling is processed before we even get into the consume call 2743 * back, any ERROR firing would result in the library's ERROR enabling 2744 * being processed twice -- once in our first pass (for BEGIN probes), 2745 * and again in our second pass (for everything but BEGIN probes). To 2746 * deal with this, we interpose on the ERROR handler to assure that we 2747 * only process ERROR enablings induced by BEGIN enablings in the 2748 * first pass, and that we only process ERROR enablings _not_ induced 2749 * by BEGIN enablings in the second pass. 2750 */ 2751 2752 dt_begin_t begin; 2753 processorid_t cpu = dtp->dt_beganon; 2754 int rval, i; 2755 static int max_ncpus; 2756 dtrace_bufdesc_t *buf; 2757 2758 dtp->dt_beganon = -1; 2759 2760 if (dt_get_buf(dtp, cpu, &buf) != 0) 2761 return (-1); 2762 if (buf == NULL) 2763 return (0); 2764 2765 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { 2766 /* 2767 * This is the simple case. We're either not stopped, or if 2768 * we are, we actually processed any END probes on another 2769 * CPU. We can simply consume this buffer and return. 2770 */ 2771 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2772 pf, rf, arg); 2773 dt_put_buf(dtp, buf); 2774 return (rval); 2775 } 2776 2777 begin.dtbgn_probefunc = pf; 2778 begin.dtbgn_recfunc = rf; 2779 begin.dtbgn_arg = arg; 2780 begin.dtbgn_beginonly = 1; 2781 2782 /* 2783 * We need to interpose on the ERROR handler to be sure that we 2784 * only process ERRORs induced by BEGIN. 2785 */ 2786 begin.dtbgn_errhdlr = dtp->dt_errhdlr; 2787 begin.dtbgn_errarg = dtp->dt_errarg; 2788 dtp->dt_errhdlr = dt_consume_begin_error; 2789 dtp->dt_errarg = &begin; 2790 2791 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2792 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2793 2794 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2795 dtp->dt_errarg = begin.dtbgn_errarg; 2796 2797 if (rval != 0) { 2798 dt_put_buf(dtp, buf); 2799 return (rval); 2800 } 2801 2802 if (max_ncpus == 0) 2803 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2804 2805 for (i = 0; i < max_ncpus; i++) { 2806 dtrace_bufdesc_t *nbuf; 2807 if (i == cpu) 2808 continue; 2809 2810 if (dt_get_buf(dtp, i, &nbuf) != 0) { 2811 dt_put_buf(dtp, buf); 2812 return (-1); 2813 } 2814 if (nbuf == NULL) 2815 continue; 2816 2817 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE, 2818 pf, rf, arg); 2819 dt_put_buf(dtp, nbuf); 2820 if (rval != 0) { 2821 dt_put_buf(dtp, buf); 2822 return (rval); 2823 } 2824 } 2825 2826 /* 2827 * Okay -- we're done with the other buffers. Now we want to 2828 * reconsume the first buffer -- but this time we're looking for 2829 * everything _but_ BEGIN. And of course, in order to only consume 2830 * those ERRORs _not_ associated with BEGIN, we need to reinstall our 2831 * ERROR interposition function... 2832 */ 2833 begin.dtbgn_beginonly = 0; 2834 2835 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); 2836 assert(begin.dtbgn_errarg == dtp->dt_errarg); 2837 dtp->dt_errhdlr = dt_consume_begin_error; 2838 dtp->dt_errarg = &begin; 2839 2840 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2841 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2842 2843 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2844 dtp->dt_errarg = begin.dtbgn_errarg; 2845 2846 return (rval); 2847 } 2848 2849 /* ARGSUSED */ 2850 static uint64_t 2851 dt_buf_oldest(void *elem, void *arg) 2852 { 2853 dtrace_bufdesc_t *buf = elem; 2854 size_t offs = buf->dtbd_oldest; 2855 2856 while (offs < buf->dtbd_size) { 2857 dtrace_rechdr_t *dtrh = 2858 /* LINTED - alignment */ 2859 (dtrace_rechdr_t *)(buf->dtbd_data + offs); 2860 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2861 offs += sizeof (dtrace_epid_t); 2862 } else { 2863 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh)); 2864 } 2865 } 2866 2867 /* There are no records left; use the time the buffer was retrieved. */ 2868 return (buf->dtbd_timestamp); 2869 } 2870 2871 int 2872 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp, 2873 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2874 { 2875 dtrace_optval_t size; 2876 static int max_ncpus; 2877 int i, rval; 2878 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; 2879 hrtime_t now = gethrtime(); 2880 2881 if (dtp->dt_lastswitch != 0) { 2882 if (now - dtp->dt_lastswitch < interval) 2883 return (0); 2884 2885 dtp->dt_lastswitch += interval; 2886 } else { 2887 dtp->dt_lastswitch = now; 2888 } 2889 2890 if (!dtp->dt_active) 2891 return (dt_set_errno(dtp, EINVAL)); 2892 2893 if (max_ncpus == 0) 2894 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2895 2896 if (pf == NULL) 2897 pf = (dtrace_consume_probe_f *)dt_nullprobe; 2898 2899 if (rf == NULL) 2900 rf = (dtrace_consume_rec_f *)dt_nullrec; 2901 2902 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) { 2903 /* 2904 * The output will not be in the order it was traced. Rather, 2905 * we will consume all of the data from each CPU's buffer in 2906 * turn. We apply special handling for the records from BEGIN 2907 * and END probes so that they are consumed first and last, 2908 * respectively. 2909 * 2910 * If we have just begun, we want to first process the CPU that 2911 * executed the BEGIN probe (if any). 2912 */ 2913 if (dtp->dt_active && dtp->dt_beganon != -1 && 2914 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0) 2915 return (rval); 2916 2917 for (i = 0; i < max_ncpus; i++) { 2918 dtrace_bufdesc_t *buf; 2919 2920 /* 2921 * If we have stopped, we want to process the CPU on 2922 * which the END probe was processed only _after_ we 2923 * have processed everything else. 2924 */ 2925 if (dtp->dt_stopped && (i == dtp->dt_endedon)) 2926 continue; 2927 2928 if (dt_get_buf(dtp, i, &buf) != 0) 2929 return (-1); 2930 if (buf == NULL) 2931 continue; 2932 2933 dtp->dt_flow = 0; 2934 dtp->dt_indent = 0; 2935 dtp->dt_prefix = NULL; 2936 rval = dt_consume_cpu(dtp, fp, i, 2937 buf, B_FALSE, pf, rf, arg); 2938 dt_put_buf(dtp, buf); 2939 if (rval != 0) 2940 return (rval); 2941 } 2942 if (dtp->dt_stopped) { 2943 dtrace_bufdesc_t *buf; 2944 2945 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0) 2946 return (-1); 2947 if (buf == NULL) 2948 return (0); 2949 2950 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon, 2951 buf, B_FALSE, pf, rf, arg); 2952 dt_put_buf(dtp, buf); 2953 return (rval); 2954 } 2955 } else { 2956 /* 2957 * The output will be in the order it was traced (or for 2958 * speculations, when it was committed). We retrieve a buffer 2959 * from each CPU and put it into a priority queue, which sorts 2960 * based on the first entry in the buffer. This is sufficient 2961 * because entries within a buffer are already sorted. 2962 * 2963 * We then consume records one at a time, always consuming the 2964 * oldest record, as determined by the priority queue. When 2965 * we reach the end of the time covered by these buffers, 2966 * we need to stop and retrieve more records on the next pass. 2967 * The kernel tells us the time covered by each buffer, in 2968 * dtbd_timestamp. The first buffer's timestamp tells us the 2969 * time covered by all buffers, as subsequently retrieved 2970 * buffers will cover to a more recent time. 2971 */ 2972 2973 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t)); 2974 uint64_t first_timestamp = 0; 2975 uint_t cookie = 0; 2976 dtrace_bufdesc_t *buf; 2977 2978 bzero(drops, max_ncpus * sizeof (uint64_t)); 2979 2980 if (dtp->dt_bufq == NULL) { 2981 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2, 2982 dt_buf_oldest, NULL); 2983 if (dtp->dt_bufq == NULL) /* ENOMEM */ 2984 return (-1); 2985 } 2986 2987 /* Retrieve data from each CPU. */ 2988 (void) dtrace_getopt(dtp, "bufsize", &size); 2989 for (i = 0; i < max_ncpus; i++) { 2990 dtrace_bufdesc_t *buf; 2991 2992 if (dt_get_buf(dtp, i, &buf) != 0) 2993 return (-1); 2994 if (buf != NULL) { 2995 if (first_timestamp == 0) 2996 first_timestamp = buf->dtbd_timestamp; 2997 assert(buf->dtbd_timestamp >= first_timestamp); 2998 2999 dt_pq_insert(dtp->dt_bufq, buf); 3000 drops[i] = buf->dtbd_drops; 3001 buf->dtbd_drops = 0; 3002 } 3003 } 3004 3005 /* Consume records. */ 3006 for (;;) { 3007 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq); 3008 uint64_t timestamp; 3009 3010 if (buf == NULL) 3011 break; 3012 3013 timestamp = dt_buf_oldest(buf, dtp); 3014 assert(timestamp >= dtp->dt_last_timestamp); 3015 dtp->dt_last_timestamp = timestamp; 3016 3017 if (timestamp == buf->dtbd_timestamp) { 3018 /* 3019 * We've reached the end of the time covered 3020 * by this buffer. If this is the oldest 3021 * buffer, we must do another pass 3022 * to retrieve more data. 3023 */ 3024 dt_put_buf(dtp, buf); 3025 if (timestamp == first_timestamp && 3026 !dtp->dt_stopped) 3027 break; 3028 continue; 3029 } 3030 3031 if ((rval = dt_consume_cpu(dtp, fp, 3032 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0) 3033 return (rval); 3034 dt_pq_insert(dtp->dt_bufq, buf); 3035 } 3036 3037 /* Consume drops. */ 3038 for (i = 0; i < max_ncpus; i++) { 3039 if (drops[i] != 0) { 3040 int error = dt_handle_cpudrop(dtp, i, 3041 DTRACEDROP_PRINCIPAL, drops[i]); 3042 if (error != 0) 3043 return (error); 3044 } 3045 } 3046 3047 /* 3048 * Reduce memory usage by re-allocating smaller buffers 3049 * for the "remnants". 3050 */ 3051 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie)) 3052 dt_realloc_buf(dtp, buf, buf->dtbd_size); 3053 } 3054 3055 return (0); 3056 } 3057