1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 28 * Copyright (c) 2012 by Delphix. All rights reserved. 29 */ 30 31 #include <stdlib.h> 32 #include <strings.h> 33 #include <errno.h> 34 #include <unistd.h> 35 #include <limits.h> 36 #include <assert.h> 37 #include <ctype.h> 38 #ifdef illumos 39 #include <alloca.h> 40 #endif 41 #include <dt_impl.h> 42 #include <dt_pq.h> 43 #ifndef illumos 44 #include <libproc_compat.h> 45 #endif 46 47 #define DT_MASK_LO 0x00000000FFFFFFFFULL 48 49 /* 50 * We declare this here because (1) we need it and (2) we want to avoid a 51 * dependency on libm in libdtrace. 52 */ 53 static long double 54 dt_fabsl(long double x) 55 { 56 if (x < 0) 57 return (-x); 58 59 return (x); 60 } 61 62 static int 63 dt_ndigits(long long val) 64 { 65 int rval = 1; 66 long long cmp = 10; 67 68 if (val < 0) { 69 val = val == INT64_MIN ? INT64_MAX : -val; 70 rval++; 71 } 72 73 while (val > cmp && cmp > 0) { 74 rval++; 75 cmp *= 10; 76 } 77 78 return (rval < 4 ? 4 : rval); 79 } 80 81 /* 82 * 128-bit arithmetic functions needed to support the stddev() aggregating 83 * action. 84 */ 85 static int 86 dt_gt_128(uint64_t *a, uint64_t *b) 87 { 88 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0])); 89 } 90 91 static int 92 dt_ge_128(uint64_t *a, uint64_t *b) 93 { 94 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0])); 95 } 96 97 static int 98 dt_le_128(uint64_t *a, uint64_t *b) 99 { 100 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); 101 } 102 103 /* 104 * Shift the 128-bit value in a by b. If b is positive, shift left. 105 * If b is negative, shift right. 106 */ 107 static void 108 dt_shift_128(uint64_t *a, int b) 109 { 110 uint64_t mask; 111 112 if (b == 0) 113 return; 114 115 if (b < 0) { 116 b = -b; 117 if (b >= 64) { 118 a[0] = a[1] >> (b - 64); 119 a[1] = 0; 120 } else { 121 a[0] >>= b; 122 mask = 1LL << (64 - b); 123 mask -= 1; 124 a[0] |= ((a[1] & mask) << (64 - b)); 125 a[1] >>= b; 126 } 127 } else { 128 if (b >= 64) { 129 a[1] = a[0] << (b - 64); 130 a[0] = 0; 131 } else { 132 a[1] <<= b; 133 mask = a[0] >> (64 - b); 134 a[1] |= mask; 135 a[0] <<= b; 136 } 137 } 138 } 139 140 static int 141 dt_nbits_128(uint64_t *a) 142 { 143 int nbits = 0; 144 uint64_t tmp[2]; 145 uint64_t zero[2] = { 0, 0 }; 146 147 tmp[0] = a[0]; 148 tmp[1] = a[1]; 149 150 dt_shift_128(tmp, -1); 151 while (dt_gt_128(tmp, zero)) { 152 dt_shift_128(tmp, -1); 153 nbits++; 154 } 155 156 return (nbits); 157 } 158 159 static void 160 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference) 161 { 162 uint64_t result[2]; 163 164 result[0] = minuend[0] - subtrahend[0]; 165 result[1] = minuend[1] - subtrahend[1] - 166 (minuend[0] < subtrahend[0] ? 1 : 0); 167 168 difference[0] = result[0]; 169 difference[1] = result[1]; 170 } 171 172 static void 173 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 174 { 175 uint64_t result[2]; 176 177 result[0] = addend1[0] + addend2[0]; 178 result[1] = addend1[1] + addend2[1] + 179 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 180 181 sum[0] = result[0]; 182 sum[1] = result[1]; 183 } 184 185 /* 186 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 187 * use native multiplication on those, and then re-combine into the 188 * resulting 128-bit value. 189 * 190 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 191 * hi1 * hi2 << 64 + 192 * hi1 * lo2 << 32 + 193 * hi2 * lo1 << 32 + 194 * lo1 * lo2 195 */ 196 static void 197 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 198 { 199 uint64_t hi1, hi2, lo1, lo2; 200 uint64_t tmp[2]; 201 202 hi1 = factor1 >> 32; 203 hi2 = factor2 >> 32; 204 205 lo1 = factor1 & DT_MASK_LO; 206 lo2 = factor2 & DT_MASK_LO; 207 208 product[0] = lo1 * lo2; 209 product[1] = hi1 * hi2; 210 211 tmp[0] = hi1 * lo2; 212 tmp[1] = 0; 213 dt_shift_128(tmp, 32); 214 dt_add_128(product, tmp, product); 215 216 tmp[0] = hi2 * lo1; 217 tmp[1] = 0; 218 dt_shift_128(tmp, 32); 219 dt_add_128(product, tmp, product); 220 } 221 222 /* 223 * This is long-hand division. 224 * 225 * We initialize subtrahend by shifting divisor left as far as possible. We 226 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we 227 * subtract and set the appropriate bit in the result. We then shift 228 * subtrahend right by one bit for the next comparison. 229 */ 230 static void 231 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient) 232 { 233 uint64_t result[2] = { 0, 0 }; 234 uint64_t remainder[2]; 235 uint64_t subtrahend[2]; 236 uint64_t divisor_128[2]; 237 uint64_t mask[2] = { 1, 0 }; 238 int log = 0; 239 240 assert(divisor != 0); 241 242 divisor_128[0] = divisor; 243 divisor_128[1] = 0; 244 245 remainder[0] = dividend[0]; 246 remainder[1] = dividend[1]; 247 248 subtrahend[0] = divisor; 249 subtrahend[1] = 0; 250 251 while (divisor > 0) { 252 log++; 253 divisor >>= 1; 254 } 255 256 dt_shift_128(subtrahend, 128 - log); 257 dt_shift_128(mask, 128 - log); 258 259 while (dt_ge_128(remainder, divisor_128)) { 260 if (dt_ge_128(remainder, subtrahend)) { 261 dt_subtract_128(remainder, subtrahend, remainder); 262 result[0] |= mask[0]; 263 result[1] |= mask[1]; 264 } 265 266 dt_shift_128(subtrahend, -1); 267 dt_shift_128(mask, -1); 268 } 269 270 quotient[0] = result[0]; 271 quotient[1] = result[1]; 272 } 273 274 /* 275 * This is the long-hand method of calculating a square root. 276 * The algorithm is as follows: 277 * 278 * 1. Group the digits by 2 from the right. 279 * 2. Over the leftmost group, find the largest single-digit number 280 * whose square is less than that group. 281 * 3. Subtract the result of the previous step (2 or 4, depending) and 282 * bring down the next two-digit group. 283 * 4. For the result R we have so far, find the largest single-digit number 284 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3. 285 * (Note that this is doubling R and performing a decimal left-shift by 1 286 * and searching for the appropriate decimal to fill the one's place.) 287 * The value x is the next digit in the square root. 288 * Repeat steps 3 and 4 until the desired precision is reached. (We're 289 * dealing with integers, so the above is sufficient.) 290 * 291 * In decimal, the square root of 582,734 would be calculated as so: 292 * 293 * __7__6__3 294 * | 58 27 34 295 * -49 (7^2 == 49 => 7 is the first digit in the square root) 296 * -- 297 * 9 27 (Subtract and bring down the next group.) 298 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in 299 * ----- the square root) 300 * 51 34 (Subtract and bring down the next group.) 301 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in 302 * ----- the square root) 303 * 5 65 (remainder) 304 * 305 * The above algorithm applies similarly in binary, but note that the 306 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a 307 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the 308 * preceding difference? 309 * 310 * In binary, the square root of 11011011 would be calculated as so: 311 * 312 * __1__1__1__0 313 * | 11 01 10 11 314 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1) 315 * -- 316 * 10 01 10 11 317 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1) 318 * ----- 319 * 1 00 10 11 320 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1) 321 * ------- 322 * 1 01 11 323 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0) 324 * 325 */ 326 static uint64_t 327 dt_sqrt_128(uint64_t *square) 328 { 329 uint64_t result[2] = { 0, 0 }; 330 uint64_t diff[2] = { 0, 0 }; 331 uint64_t one[2] = { 1, 0 }; 332 uint64_t next_pair[2]; 333 uint64_t next_try[2]; 334 uint64_t bit_pairs, pair_shift; 335 int i; 336 337 bit_pairs = dt_nbits_128(square) / 2; 338 pair_shift = bit_pairs * 2; 339 340 for (i = 0; i <= bit_pairs; i++) { 341 /* 342 * Bring down the next pair of bits. 343 */ 344 next_pair[0] = square[0]; 345 next_pair[1] = square[1]; 346 dt_shift_128(next_pair, -pair_shift); 347 next_pair[0] &= 0x3; 348 next_pair[1] = 0; 349 350 dt_shift_128(diff, 2); 351 dt_add_128(diff, next_pair, diff); 352 353 /* 354 * next_try = R << 2 + 1 355 */ 356 next_try[0] = result[0]; 357 next_try[1] = result[1]; 358 dt_shift_128(next_try, 2); 359 dt_add_128(next_try, one, next_try); 360 361 if (dt_le_128(next_try, diff)) { 362 dt_subtract_128(diff, next_try, diff); 363 dt_shift_128(result, 1); 364 dt_add_128(result, one, result); 365 } else { 366 dt_shift_128(result, 1); 367 } 368 369 pair_shift -= 2; 370 } 371 372 assert(result[1] == 0); 373 374 return (result[0]); 375 } 376 377 uint64_t 378 dt_stddev(uint64_t *data, uint64_t normal) 379 { 380 uint64_t avg_of_squares[2]; 381 uint64_t square_of_avg[2]; 382 int64_t norm_avg; 383 uint64_t diff[2]; 384 385 if (data[0] == 0) 386 return (0); 387 388 /* 389 * The standard approximation for standard deviation is 390 * sqrt(average(x**2) - average(x)**2), i.e. the square root 391 * of the average of the squares minus the square of the average. 392 */ 393 dt_divide_128(data + 2, normal, avg_of_squares); 394 dt_divide_128(avg_of_squares, data[0], avg_of_squares); 395 396 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0]; 397 398 if (norm_avg < 0) 399 norm_avg = -norm_avg; 400 401 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg); 402 403 dt_subtract_128(avg_of_squares, square_of_avg, diff); 404 405 return (dt_sqrt_128(diff)); 406 } 407 408 static int 409 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last, 410 dtrace_bufdesc_t *buf, size_t offs) 411 { 412 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd; 413 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd; 414 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub; 415 dtrace_flowkind_t flow = DTRACEFLOW_NONE; 416 const char *str = NULL; 417 static const char *e_str[2] = { " -> ", " => " }; 418 static const char *r_str[2] = { " <- ", " <= " }; 419 static const char *ent = "entry", *ret = "return"; 420 static int entlen = 0, retlen = 0; 421 dtrace_epid_t next, id = epd->dtepd_epid; 422 int rval; 423 424 if (entlen == 0) { 425 assert(retlen == 0); 426 entlen = strlen(ent); 427 retlen = strlen(ret); 428 } 429 430 /* 431 * If the name of the probe is "entry" or ends with "-entry", we 432 * treat it as an entry; if it is "return" or ends with "-return", 433 * we treat it as a return. (This allows application-provided probes 434 * like "method-entry" or "function-entry" to participate in flow 435 * indentation -- without accidentally misinterpreting popular probe 436 * names like "carpentry", "gentry" or "Coventry".) 437 */ 438 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' && 439 (sub == n || sub[-1] == '-')) { 440 flow = DTRACEFLOW_ENTRY; 441 str = e_str[strcmp(p, "syscall") == 0]; 442 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' && 443 (sub == n || sub[-1] == '-')) { 444 flow = DTRACEFLOW_RETURN; 445 str = r_str[strcmp(p, "syscall") == 0]; 446 } 447 448 /* 449 * If we're going to indent this, we need to check the ID of our last 450 * call. If we're looking at the same probe ID but a different EPID, 451 * we _don't_ want to indent. (Yes, there are some minor holes in 452 * this scheme -- it's a heuristic.) 453 */ 454 if (flow == DTRACEFLOW_ENTRY) { 455 if ((last != DTRACE_EPIDNONE && id != last && 456 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id)) 457 flow = DTRACEFLOW_NONE; 458 } 459 460 /* 461 * If we're going to unindent this, it's more difficult to see if 462 * we don't actually want to unindent it -- we need to look at the 463 * _next_ EPID. 464 */ 465 if (flow == DTRACEFLOW_RETURN) { 466 offs += epd->dtepd_size; 467 468 do { 469 if (offs >= buf->dtbd_size) 470 goto out; 471 472 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 473 474 if (next == DTRACE_EPIDNONE) 475 offs += sizeof (id); 476 } while (next == DTRACE_EPIDNONE); 477 478 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0) 479 return (rval); 480 481 if (next != id && npd->dtpd_id == pd->dtpd_id) 482 flow = DTRACEFLOW_NONE; 483 } 484 485 out: 486 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) { 487 data->dtpda_prefix = str; 488 } else { 489 data->dtpda_prefix = "| "; 490 } 491 492 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0) 493 data->dtpda_indent -= 2; 494 495 data->dtpda_flow = flow; 496 497 return (0); 498 } 499 500 static int 501 dt_nullprobe() 502 { 503 return (DTRACE_CONSUME_THIS); 504 } 505 506 static int 507 dt_nullrec() 508 { 509 return (DTRACE_CONSUME_NEXT); 510 } 511 512 static void 513 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total) 514 { 515 long double val = dt_fabsl((long double)datum); 516 517 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) { 518 *total += val; 519 return; 520 } 521 522 /* 523 * If we're zooming in on an aggregation, we want the height of the 524 * highest value to be approximately 95% of total bar height -- so we 525 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to 526 * our highest value. 527 */ 528 val *= 1 / DTRACE_AGGZOOM_MAX; 529 530 if (*total < val) 531 *total = val; 532 } 533 534 static int 535 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width) 536 { 537 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n", 538 width ? width : 16, width ? "key" : "value", 539 "------------- Distribution -------------", "count")); 540 } 541 542 static int 543 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width, 544 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action) 545 { 546 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin; 547 int minwidth, maxwidth, i; 548 549 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE); 550 551 if (action == DTRACEAGG_QUANTIZE) { 552 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 553 min--; 554 555 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 556 max++; 557 558 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min)); 559 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max)); 560 } else { 561 maxwidth = 8; 562 minwidth = maxwidth - 1; 563 max++; 564 } 565 566 if (dt_printf(dtp, fp, "\n%*s %*s .", 567 width, width > 0 ? "key" : "", minwidth, "min") < 0) 568 return (-1); 569 570 for (i = min; i <= max; i++) { 571 if (dt_printf(dtp, fp, "-") < 0) 572 return (-1); 573 } 574 575 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max")); 576 } 577 578 /* 579 * We use a subset of the Unicode Block Elements (U+2588 through U+258F, 580 * inclusive) to represent aggregations via UTF-8 -- which are expressed via 581 * 3-byte UTF-8 sequences. 582 */ 583 #define DTRACE_AGGUTF8_FULL 0x2588 584 #define DTRACE_AGGUTF8_BASE 0x258f 585 #define DTRACE_AGGUTF8_LEVELS 8 586 587 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12)) 588 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f)) 589 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f)) 590 591 static int 592 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 593 uint64_t normal, long double total) 594 { 595 uint_t len = 40, i, whole, partial; 596 long double f = (dt_fabsl((long double)val) * len) / total; 597 const char *spaces = " "; 598 599 whole = (uint_t)f; 600 partial = (uint_t)((f - (long double)(uint_t)f) * 601 (long double)DTRACE_AGGUTF8_LEVELS); 602 603 if (dt_printf(dtp, fp, "|") < 0) 604 return (-1); 605 606 for (i = 0; i < whole; i++) { 607 if (dt_printf(dtp, fp, "%c%c%c", 608 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL), 609 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL), 610 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0) 611 return (-1); 612 } 613 614 if (partial != 0) { 615 partial = DTRACE_AGGUTF8_BASE - (partial - 1); 616 617 if (dt_printf(dtp, fp, "%c%c%c", 618 DTRACE_AGGUTF8_BYTE0(partial), 619 DTRACE_AGGUTF8_BYTE1(partial), 620 DTRACE_AGGUTF8_BYTE2(partial)) < 0) 621 return (-1); 622 623 i++; 624 } 625 626 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i, 627 (long long)val / normal)); 628 } 629 630 static int 631 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 632 uint64_t normal, long double total, char positives, char negatives) 633 { 634 long double f; 635 uint_t depth, len = 40; 636 637 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"; 638 const char *spaces = " "; 639 640 assert(strlen(ats) == len && strlen(spaces) == len); 641 assert(!(total == 0 && (positives || negatives))); 642 assert(!(val < 0 && !negatives)); 643 assert(!(val > 0 && !positives)); 644 assert(!(val != 0 && total == 0)); 645 646 if (!negatives) { 647 if (positives) { 648 if (dtp->dt_encoding == DT_ENCODING_UTF8) { 649 return (dt_print_quantline_utf8(dtp, fp, val, 650 normal, total)); 651 } 652 653 f = (dt_fabsl((long double)val) * len) / total; 654 depth = (uint_t)(f + 0.5); 655 } else { 656 depth = 0; 657 } 658 659 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth, 660 spaces + depth, (long long)val / normal)); 661 } 662 663 if (!positives) { 664 f = (dt_fabsl((long double)val) * len) / total; 665 depth = (uint_t)(f + 0.5); 666 667 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth, 668 ats + len - depth, (long long)val / normal)); 669 } 670 671 /* 672 * If we're here, we have both positive and negative bucket values. 673 * To express this graphically, we're going to generate both positive 674 * and negative bars separated by a centerline. These bars are half 675 * the size of normal quantize()/lquantize() bars, so we divide the 676 * length in half before calculating the bar length. 677 */ 678 len /= 2; 679 ats = &ats[len]; 680 spaces = &spaces[len]; 681 682 f = (dt_fabsl((long double)val) * len) / total; 683 depth = (uint_t)(f + 0.5); 684 685 if (val <= 0) { 686 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth, 687 ats + len - depth, len, "", (long long)val / normal)); 688 } else { 689 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "", 690 ats + len - depth, spaces + depth, 691 (long long)val / normal)); 692 } 693 } 694 695 /* 696 * As with UTF-8 printing of aggregations, we use a subset of the Unicode 697 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed 698 * aggregation. 699 */ 700 #define DTRACE_AGGPACK_BASE 0x2581 701 #define DTRACE_AGGPACK_LEVELS 8 702 703 static int 704 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp, 705 long double datum, long double total) 706 { 707 static boolean_t utf8_checked = B_FALSE; 708 static boolean_t utf8; 709 char *ascii = "__xxxxXX"; 710 char *neg = "vvvvVV"; 711 unsigned int len; 712 long double val; 713 714 if (!utf8_checked) { 715 char *term; 716 717 /* 718 * We want to determine if we can reasonably emit UTF-8 for our 719 * packed aggregation. To do this, we will check for terminals 720 * that are known to be primitive to emit UTF-8 on these. 721 */ 722 utf8_checked = B_TRUE; 723 724 if (dtp->dt_encoding == DT_ENCODING_ASCII) { 725 utf8 = B_FALSE; 726 } else if (dtp->dt_encoding == DT_ENCODING_UTF8) { 727 utf8 = B_TRUE; 728 } else if ((term = getenv("TERM")) != NULL && 729 (strcmp(term, "sun") == 0 || 730 strcmp(term, "sun-color") == 0 || 731 strcmp(term, "dumb") == 0)) { 732 utf8 = B_FALSE; 733 } else { 734 utf8 = B_TRUE; 735 } 736 } 737 738 if (datum == 0) 739 return (dt_printf(dtp, fp, " ")); 740 741 if (datum < 0) { 742 len = strlen(neg); 743 val = dt_fabsl(datum * (len - 1)) / total; 744 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)])); 745 } 746 747 if (utf8) { 748 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum * 749 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5); 750 751 return (dt_printf(dtp, fp, "%c%c%c", 752 DTRACE_AGGUTF8_BYTE0(block), 753 DTRACE_AGGUTF8_BYTE1(block), 754 DTRACE_AGGUTF8_BYTE2(block))); 755 } 756 757 len = strlen(ascii); 758 val = (datum * (len - 1)) / total; 759 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)])); 760 } 761 762 int 763 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 764 size_t size, uint64_t normal) 765 { 766 const int64_t *data = addr; 767 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1; 768 long double total = 0; 769 char positives = 0, negatives = 0; 770 771 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 772 return (dt_set_errno(dtp, EDT_DMISMATCH)); 773 774 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0) 775 first_bin++; 776 777 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) { 778 /* 779 * There isn't any data. This is possible if the aggregation 780 * has been clear()'d or if negative increment values have been 781 * used. Regardless, we'll print the buckets around 0. 782 */ 783 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1; 784 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1; 785 } else { 786 if (first_bin > 0) 787 first_bin--; 788 789 while (last_bin > 0 && data[last_bin] == 0) 790 last_bin--; 791 792 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1) 793 last_bin++; 794 } 795 796 for (i = first_bin; i <= last_bin; i++) { 797 positives |= (data[i] > 0); 798 negatives |= (data[i] < 0); 799 dt_quantize_total(dtp, data[i], &total); 800 } 801 802 if (dt_print_quanthdr(dtp, fp, 0) < 0) 803 return (-1); 804 805 for (i = first_bin; i <= last_bin; i++) { 806 if (dt_printf(dtp, fp, "%16lld ", 807 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0) 808 return (-1); 809 810 if (dt_print_quantline(dtp, fp, data[i], normal, total, 811 positives, negatives) < 0) 812 return (-1); 813 } 814 815 return (0); 816 } 817 818 int 819 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 820 size_t size, const dtrace_aggdata_t *aggdata) 821 { 822 const int64_t *data = addr; 823 long double total = 0, count = 0; 824 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i; 825 int64_t minval, maxval; 826 827 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 828 return (dt_set_errno(dtp, EDT_DMISMATCH)); 829 830 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 831 min--; 832 833 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 834 max++; 835 836 minval = DTRACE_QUANTIZE_BUCKETVAL(min); 837 maxval = DTRACE_QUANTIZE_BUCKETVAL(max); 838 839 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval), 840 (long long)minval) < 0) 841 return (-1); 842 843 for (i = min; i <= max; i++) { 844 dt_quantize_total(dtp, data[i], &total); 845 count += data[i]; 846 } 847 848 for (i = min; i <= max; i++) { 849 if (dt_print_packed(dtp, fp, data[i], total) < 0) 850 return (-1); 851 } 852 853 if (dt_printf(dtp, fp, ": %*lld | %lld\n", 854 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0) 855 return (-1); 856 857 return (0); 858 } 859 860 int 861 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 862 size_t size, uint64_t normal) 863 { 864 const int64_t *data = addr; 865 int i, first_bin, last_bin, base; 866 uint64_t arg; 867 long double total = 0; 868 uint16_t step, levels; 869 char positives = 0, negatives = 0; 870 871 if (size < sizeof (uint64_t)) 872 return (dt_set_errno(dtp, EDT_DMISMATCH)); 873 874 arg = *data++; 875 size -= sizeof (uint64_t); 876 877 base = DTRACE_LQUANTIZE_BASE(arg); 878 step = DTRACE_LQUANTIZE_STEP(arg); 879 levels = DTRACE_LQUANTIZE_LEVELS(arg); 880 881 first_bin = 0; 882 last_bin = levels + 1; 883 884 if (size != sizeof (uint64_t) * (levels + 2)) 885 return (dt_set_errno(dtp, EDT_DMISMATCH)); 886 887 while (first_bin <= levels + 1 && data[first_bin] == 0) 888 first_bin++; 889 890 if (first_bin > levels + 1) { 891 first_bin = 0; 892 last_bin = 2; 893 } else { 894 if (first_bin > 0) 895 first_bin--; 896 897 while (last_bin > 0 && data[last_bin] == 0) 898 last_bin--; 899 900 if (last_bin < levels + 1) 901 last_bin++; 902 } 903 904 for (i = first_bin; i <= last_bin; i++) { 905 positives |= (data[i] > 0); 906 negatives |= (data[i] < 0); 907 dt_quantize_total(dtp, data[i], &total); 908 } 909 910 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 911 "------------- Distribution -------------", "count") < 0) 912 return (-1); 913 914 for (i = first_bin; i <= last_bin; i++) { 915 char c[32]; 916 int err; 917 918 if (i == 0) { 919 (void) snprintf(c, sizeof (c), "< %d", base); 920 err = dt_printf(dtp, fp, "%16s ", c); 921 } else if (i == levels + 1) { 922 (void) snprintf(c, sizeof (c), ">= %d", 923 base + (levels * step)); 924 err = dt_printf(dtp, fp, "%16s ", c); 925 } else { 926 err = dt_printf(dtp, fp, "%16d ", 927 base + (i - 1) * step); 928 } 929 930 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal, 931 total, positives, negatives) < 0) 932 return (-1); 933 } 934 935 return (0); 936 } 937 938 /*ARGSUSED*/ 939 int 940 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 941 size_t size, const dtrace_aggdata_t *aggdata) 942 { 943 const int64_t *data = addr; 944 long double total = 0, count = 0; 945 int min, max, base, err; 946 uint64_t arg; 947 uint16_t step, levels; 948 char c[32]; 949 unsigned int i; 950 951 if (size < sizeof (uint64_t)) 952 return (dt_set_errno(dtp, EDT_DMISMATCH)); 953 954 arg = *data++; 955 size -= sizeof (uint64_t); 956 957 base = DTRACE_LQUANTIZE_BASE(arg); 958 step = DTRACE_LQUANTIZE_STEP(arg); 959 levels = DTRACE_LQUANTIZE_LEVELS(arg); 960 961 if (size != sizeof (uint64_t) * (levels + 2)) 962 return (dt_set_errno(dtp, EDT_DMISMATCH)); 963 964 min = 0; 965 max = levels + 1; 966 967 if (min == 0) { 968 (void) snprintf(c, sizeof (c), "< %d", base); 969 err = dt_printf(dtp, fp, "%8s :", c); 970 } else { 971 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step); 972 } 973 974 if (err < 0) 975 return (-1); 976 977 for (i = min; i <= max; i++) { 978 dt_quantize_total(dtp, data[i], &total); 979 count += data[i]; 980 } 981 982 for (i = min; i <= max; i++) { 983 if (dt_print_packed(dtp, fp, data[i], total) < 0) 984 return (-1); 985 } 986 987 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step)); 988 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count)); 989 } 990 991 int 992 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 993 size_t size, uint64_t normal) 994 { 995 int i, first_bin, last_bin, bin = 1, order, levels; 996 uint16_t factor, low, high, nsteps; 997 const int64_t *data = addr; 998 int64_t value = 1, next, step; 999 char positives = 0, negatives = 0; 1000 long double total = 0; 1001 uint64_t arg; 1002 char c[32]; 1003 1004 if (size < sizeof (uint64_t)) 1005 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1006 1007 arg = *data++; 1008 size -= sizeof (uint64_t); 1009 1010 factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1011 low = DTRACE_LLQUANTIZE_LOW(arg); 1012 high = DTRACE_LLQUANTIZE_HIGH(arg); 1013 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1014 1015 /* 1016 * We don't expect to be handed invalid llquantize() parameters here, 1017 * but sanity check them (to a degree) nonetheless. 1018 */ 1019 if (size > INT32_MAX || factor < 2 || low >= high || 1020 nsteps == 0 || factor > nsteps) 1021 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1022 1023 levels = (int)size / sizeof (uint64_t); 1024 1025 first_bin = 0; 1026 last_bin = levels - 1; 1027 1028 while (first_bin < levels && data[first_bin] == 0) 1029 first_bin++; 1030 1031 if (first_bin == levels) { 1032 first_bin = 0; 1033 last_bin = 1; 1034 } else { 1035 if (first_bin > 0) 1036 first_bin--; 1037 1038 while (last_bin > 0 && data[last_bin] == 0) 1039 last_bin--; 1040 1041 if (last_bin < levels - 1) 1042 last_bin++; 1043 } 1044 1045 for (i = first_bin; i <= last_bin; i++) { 1046 positives |= (data[i] > 0); 1047 negatives |= (data[i] < 0); 1048 dt_quantize_total(dtp, data[i], &total); 1049 } 1050 1051 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 1052 "------------- Distribution -------------", "count") < 0) 1053 return (-1); 1054 1055 for (order = 0; order < low; order++) 1056 value *= factor; 1057 1058 next = value * factor; 1059 step = next > nsteps ? next / nsteps : 1; 1060 1061 if (first_bin == 0) { 1062 (void) snprintf(c, sizeof (c), "< %lld", (long long)value); 1063 1064 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1065 return (-1); 1066 1067 if (dt_print_quantline(dtp, fp, data[0], normal, 1068 total, positives, negatives) < 0) 1069 return (-1); 1070 } 1071 1072 while (order <= high) { 1073 if (bin >= first_bin && bin <= last_bin) { 1074 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0) 1075 return (-1); 1076 1077 if (dt_print_quantline(dtp, fp, data[bin], 1078 normal, total, positives, negatives) < 0) 1079 return (-1); 1080 } 1081 1082 assert(value < next); 1083 bin++; 1084 1085 if ((value += step) != next) 1086 continue; 1087 1088 next = value * factor; 1089 step = next > nsteps ? next / nsteps : 1; 1090 order++; 1091 } 1092 1093 if (last_bin < bin) 1094 return (0); 1095 1096 assert(last_bin == bin); 1097 (void) snprintf(c, sizeof (c), ">= %lld", (long long)value); 1098 1099 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1100 return (-1); 1101 1102 return (dt_print_quantline(dtp, fp, data[bin], normal, 1103 total, positives, negatives)); 1104 } 1105 1106 /*ARGSUSED*/ 1107 static int 1108 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1109 size_t size, uint64_t normal) 1110 { 1111 /* LINTED - alignment */ 1112 int64_t *data = (int64_t *)addr; 1113 1114 return (dt_printf(dtp, fp, " %16lld", data[0] ? 1115 (long long)(data[1] / (int64_t)normal / data[0]) : 0)); 1116 } 1117 1118 /*ARGSUSED*/ 1119 static int 1120 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1121 size_t size, uint64_t normal) 1122 { 1123 /* LINTED - alignment */ 1124 uint64_t *data = (uint64_t *)addr; 1125 1126 return (dt_printf(dtp, fp, " %16llu", data[0] ? 1127 (unsigned long long) dt_stddev(data, normal) : 0)); 1128 } 1129 1130 /*ARGSUSED*/ 1131 static int 1132 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1133 size_t nbytes, int width, int quiet, int forceraw) 1134 { 1135 /* 1136 * If the byte stream is a series of printable characters, followed by 1137 * a terminating byte, we print it out as a string. Otherwise, we 1138 * assume that it's something else and just print the bytes. 1139 */ 1140 int i, j, margin = 5; 1141 char *c = (char *)addr; 1142 1143 if (nbytes == 0) 1144 return (0); 1145 1146 if (forceraw) 1147 goto raw; 1148 1149 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET) 1150 goto raw; 1151 1152 for (i = 0; i < nbytes; i++) { 1153 /* 1154 * We define a "printable character" to be one for which 1155 * isprint(3C) returns non-zero, isspace(3C) returns non-zero, 1156 * or a character which is either backspace or the bell. 1157 * Backspace and the bell are regrettably special because 1158 * they fail the first two tests -- and yet they are entirely 1159 * printable. These are the only two control characters that 1160 * have meaning for the terminal and for which isprint(3C) and 1161 * isspace(3C) return 0. 1162 */ 1163 if (isprint(c[i]) || isspace(c[i]) || 1164 c[i] == '\b' || c[i] == '\a') 1165 continue; 1166 1167 if (c[i] == '\0' && i > 0) { 1168 /* 1169 * This looks like it might be a string. Before we 1170 * assume that it is indeed a string, check the 1171 * remainder of the byte range; if it contains 1172 * additional non-nul characters, we'll assume that 1173 * it's a binary stream that just happens to look like 1174 * a string, and we'll print out the individual bytes. 1175 */ 1176 for (j = i + 1; j < nbytes; j++) { 1177 if (c[j] != '\0') 1178 break; 1179 } 1180 1181 if (j != nbytes) 1182 break; 1183 1184 if (quiet) { 1185 return (dt_printf(dtp, fp, "%s", c)); 1186 } else { 1187 return (dt_printf(dtp, fp, " %s%*s", 1188 width < 0 ? " " : "", width, c)); 1189 } 1190 } 1191 1192 break; 1193 } 1194 1195 if (i == nbytes) { 1196 /* 1197 * The byte range is all printable characters, but there is 1198 * no trailing nul byte. We'll assume that it's a string and 1199 * print it as such. 1200 */ 1201 char *s = alloca(nbytes + 1); 1202 bcopy(c, s, nbytes); 1203 s[nbytes] = '\0'; 1204 return (dt_printf(dtp, fp, " %-*s", width, s)); 1205 } 1206 1207 raw: 1208 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0) 1209 return (-1); 1210 1211 for (i = 0; i < 16; i++) 1212 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0) 1213 return (-1); 1214 1215 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0) 1216 return (-1); 1217 1218 1219 for (i = 0; i < nbytes; i += 16) { 1220 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0) 1221 return (-1); 1222 1223 for (j = i; j < i + 16 && j < nbytes; j++) { 1224 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0) 1225 return (-1); 1226 } 1227 1228 while (j++ % 16) { 1229 if (dt_printf(dtp, fp, " ") < 0) 1230 return (-1); 1231 } 1232 1233 if (dt_printf(dtp, fp, " ") < 0) 1234 return (-1); 1235 1236 for (j = i; j < i + 16 && j < nbytes; j++) { 1237 if (dt_printf(dtp, fp, "%c", 1238 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0) 1239 return (-1); 1240 } 1241 1242 if (dt_printf(dtp, fp, "\n") < 0) 1243 return (-1); 1244 } 1245 1246 return (0); 1247 } 1248 1249 int 1250 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1251 caddr_t addr, int depth, int size) 1252 { 1253 dtrace_syminfo_t dts; 1254 GElf_Sym sym; 1255 int i, indent; 1256 char c[PATH_MAX * 2]; 1257 uint64_t pc; 1258 1259 if (dt_printf(dtp, fp, "\n") < 0) 1260 return (-1); 1261 1262 if (format == NULL) 1263 format = "%s"; 1264 1265 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1266 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1267 else 1268 indent = _dtrace_stkindent; 1269 1270 for (i = 0; i < depth; i++) { 1271 switch (size) { 1272 case sizeof (uint32_t): 1273 /* LINTED - alignment */ 1274 pc = *((uint32_t *)addr); 1275 break; 1276 1277 case sizeof (uint64_t): 1278 /* LINTED - alignment */ 1279 pc = *((uint64_t *)addr); 1280 break; 1281 1282 default: 1283 return (dt_set_errno(dtp, EDT_BADSTACKPC)); 1284 } 1285 1286 if (pc == 0) 1287 break; 1288 1289 addr += size; 1290 1291 if (dt_printf(dtp, fp, "%*s", indent, "") < 0) 1292 return (-1); 1293 1294 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1295 if (pc > sym.st_value) { 1296 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx", 1297 dts.dts_object, dts.dts_name, 1298 (u_longlong_t)(pc - sym.st_value)); 1299 } else { 1300 (void) snprintf(c, sizeof (c), "%s`%s", 1301 dts.dts_object, dts.dts_name); 1302 } 1303 } else { 1304 /* 1305 * We'll repeat the lookup, but this time we'll specify 1306 * a NULL GElf_Sym -- indicating that we're only 1307 * interested in the containing module. 1308 */ 1309 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1310 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1311 dts.dts_object, (u_longlong_t)pc); 1312 } else { 1313 (void) snprintf(c, sizeof (c), "0x%llx", 1314 (u_longlong_t)pc); 1315 } 1316 } 1317 1318 if (dt_printf(dtp, fp, format, c) < 0) 1319 return (-1); 1320 1321 if (dt_printf(dtp, fp, "\n") < 0) 1322 return (-1); 1323 } 1324 1325 return (0); 1326 } 1327 1328 int 1329 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1330 caddr_t addr, uint64_t arg) 1331 { 1332 /* LINTED - alignment */ 1333 uint64_t *pc = (uint64_t *)addr; 1334 uint32_t depth = DTRACE_USTACK_NFRAMES(arg); 1335 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg); 1336 const char *strbase = addr + (depth + 1) * sizeof (uint64_t); 1337 const char *str = strsize ? strbase : NULL; 1338 int err = 0; 1339 1340 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2]; 1341 struct ps_prochandle *P; 1342 GElf_Sym sym; 1343 int i, indent; 1344 pid_t pid; 1345 1346 if (depth == 0) 1347 return (0); 1348 1349 pid = (pid_t)*pc++; 1350 1351 if (dt_printf(dtp, fp, "\n") < 0) 1352 return (-1); 1353 1354 if (format == NULL) 1355 format = "%s"; 1356 1357 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1358 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1359 else 1360 indent = _dtrace_stkindent; 1361 1362 /* 1363 * Ultimately, we need to add an entry point in the library vector for 1364 * determining <symbol, offset> from <pid, address>. For now, if 1365 * this is a vector open, we just print the raw address or string. 1366 */ 1367 if (dtp->dt_vector == NULL) 1368 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1369 else 1370 P = NULL; 1371 1372 if (P != NULL) 1373 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1374 1375 for (i = 0; i < depth && pc[i] != 0; i++) { 1376 const prmap_t *map; 1377 1378 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1379 break; 1380 1381 if (P != NULL && Plookup_by_addr(P, pc[i], 1382 name, sizeof (name), &sym) == 0) { 1383 (void) Pobjname(P, pc[i], objname, sizeof (objname)); 1384 1385 if (pc[i] > sym.st_value) { 1386 (void) snprintf(c, sizeof (c), 1387 "%s`%s+0x%llx", dt_basename(objname), name, 1388 (u_longlong_t)(pc[i] - sym.st_value)); 1389 } else { 1390 (void) snprintf(c, sizeof (c), 1391 "%s`%s", dt_basename(objname), name); 1392 } 1393 } else if (str != NULL && str[0] != '\0' && str[0] != '@' && 1394 (P != NULL && ((map = Paddr_to_map(P, pc[i])) == NULL || 1395 (map->pr_mflags & MA_WRITE)))) { 1396 /* 1397 * If the current string pointer in the string table 1398 * does not point to an empty string _and_ the program 1399 * counter falls in a writable region, we'll use the 1400 * string from the string table instead of the raw 1401 * address. This last condition is necessary because 1402 * some (broken) ustack helpers will return a string 1403 * even for a program counter that they can't 1404 * identify. If we have a string for a program 1405 * counter that falls in a segment that isn't 1406 * writable, we assume that we have fallen into this 1407 * case and we refuse to use the string. 1408 */ 1409 (void) snprintf(c, sizeof (c), "%s", str); 1410 } else { 1411 if (P != NULL && Pobjname(P, pc[i], objname, 1412 sizeof (objname)) != 0) { 1413 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1414 dt_basename(objname), (u_longlong_t)pc[i]); 1415 } else { 1416 (void) snprintf(c, sizeof (c), "0x%llx", 1417 (u_longlong_t)pc[i]); 1418 } 1419 } 1420 1421 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1422 break; 1423 1424 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1425 break; 1426 1427 if (str != NULL && str[0] == '@') { 1428 /* 1429 * If the first character of the string is an "at" sign, 1430 * then the string is inferred to be an annotation -- 1431 * and it is printed out beneath the frame and offset 1432 * with brackets. 1433 */ 1434 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1435 break; 1436 1437 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]); 1438 1439 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1440 break; 1441 1442 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1443 break; 1444 } 1445 1446 if (str != NULL) { 1447 str += strlen(str) + 1; 1448 if (str - strbase >= strsize) 1449 str = NULL; 1450 } 1451 } 1452 1453 if (P != NULL) { 1454 dt_proc_unlock(dtp, P); 1455 dt_proc_release(dtp, P); 1456 } 1457 1458 return (err); 1459 } 1460 1461 static int 1462 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act) 1463 { 1464 /* LINTED - alignment */ 1465 uint64_t pid = ((uint64_t *)addr)[0]; 1466 /* LINTED - alignment */ 1467 uint64_t pc = ((uint64_t *)addr)[1]; 1468 const char *format = " %-50s"; 1469 char *s; 1470 int n, len = 256; 1471 1472 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) { 1473 struct ps_prochandle *P; 1474 1475 if ((P = dt_proc_grab(dtp, pid, 1476 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) { 1477 GElf_Sym sym; 1478 1479 dt_proc_lock(dtp, P); 1480 1481 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0) 1482 pc = sym.st_value; 1483 1484 dt_proc_unlock(dtp, P); 1485 dt_proc_release(dtp, P); 1486 } 1487 } 1488 1489 do { 1490 n = len; 1491 s = alloca(n); 1492 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n); 1493 1494 return (dt_printf(dtp, fp, format, s)); 1495 } 1496 1497 int 1498 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1499 { 1500 /* LINTED - alignment */ 1501 uint64_t pid = ((uint64_t *)addr)[0]; 1502 /* LINTED - alignment */ 1503 uint64_t pc = ((uint64_t *)addr)[1]; 1504 int err = 0; 1505 1506 char objname[PATH_MAX], c[PATH_MAX * 2]; 1507 struct ps_prochandle *P; 1508 1509 if (format == NULL) 1510 format = " %-50s"; 1511 1512 /* 1513 * See the comment in dt_print_ustack() for the rationale for 1514 * printing raw addresses in the vectored case. 1515 */ 1516 if (dtp->dt_vector == NULL) 1517 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1518 else 1519 P = NULL; 1520 1521 if (P != NULL) 1522 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1523 1524 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != 0) { 1525 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname)); 1526 } else { 1527 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1528 } 1529 1530 err = dt_printf(dtp, fp, format, c); 1531 1532 if (P != NULL) { 1533 dt_proc_unlock(dtp, P); 1534 dt_proc_release(dtp, P); 1535 } 1536 1537 return (err); 1538 } 1539 1540 static int 1541 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1542 { 1543 /* LINTED - alignment */ 1544 uint64_t pc = *((uint64_t *)addr); 1545 dtrace_syminfo_t dts; 1546 GElf_Sym sym; 1547 char c[PATH_MAX * 2]; 1548 1549 if (format == NULL) 1550 format = " %-50s"; 1551 1552 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1553 (void) snprintf(c, sizeof (c), "%s`%s", 1554 dts.dts_object, dts.dts_name); 1555 } else { 1556 /* 1557 * We'll repeat the lookup, but this time we'll specify a 1558 * NULL GElf_Sym -- indicating that we're only interested in 1559 * the containing module. 1560 */ 1561 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1562 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1563 dts.dts_object, (u_longlong_t)pc); 1564 } else { 1565 (void) snprintf(c, sizeof (c), "0x%llx", 1566 (u_longlong_t)pc); 1567 } 1568 } 1569 1570 if (dt_printf(dtp, fp, format, c) < 0) 1571 return (-1); 1572 1573 return (0); 1574 } 1575 1576 int 1577 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1578 { 1579 /* LINTED - alignment */ 1580 uint64_t pc = *((uint64_t *)addr); 1581 dtrace_syminfo_t dts; 1582 char c[PATH_MAX * 2]; 1583 1584 if (format == NULL) 1585 format = " %-50s"; 1586 1587 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1588 (void) snprintf(c, sizeof (c), "%s", dts.dts_object); 1589 } else { 1590 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1591 } 1592 1593 if (dt_printf(dtp, fp, format, c) < 0) 1594 return (-1); 1595 1596 return (0); 1597 } 1598 1599 static int 1600 dt_print_memory(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr) 1601 { 1602 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 1603 size_t nbytes = *((uintptr_t *) addr); 1604 1605 return (dt_print_bytes(dtp, fp, addr + sizeof(uintptr_t), 1606 nbytes, 50, quiet, 1)); 1607 } 1608 1609 typedef struct dt_normal { 1610 dtrace_aggvarid_t dtnd_id; 1611 uint64_t dtnd_normal; 1612 } dt_normal_t; 1613 1614 static int 1615 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1616 { 1617 dt_normal_t *normal = arg; 1618 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1619 dtrace_aggvarid_t id = normal->dtnd_id; 1620 1621 if (agg->dtagd_nrecs == 0) 1622 return (DTRACE_AGGWALK_NEXT); 1623 1624 if (agg->dtagd_varid != id) 1625 return (DTRACE_AGGWALK_NEXT); 1626 1627 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal; 1628 return (DTRACE_AGGWALK_NORMALIZE); 1629 } 1630 1631 static int 1632 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1633 { 1634 dt_normal_t normal; 1635 caddr_t addr; 1636 1637 /* 1638 * We (should) have two records: the aggregation ID followed by the 1639 * normalization value. 1640 */ 1641 addr = base + rec->dtrd_offset; 1642 1643 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1644 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1645 1646 /* LINTED - alignment */ 1647 normal.dtnd_id = *((dtrace_aggvarid_t *)addr); 1648 rec++; 1649 1650 if (rec->dtrd_action != DTRACEACT_LIBACT) 1651 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1652 1653 if (rec->dtrd_arg != DT_ACT_NORMALIZE) 1654 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1655 1656 addr = base + rec->dtrd_offset; 1657 1658 switch (rec->dtrd_size) { 1659 case sizeof (uint64_t): 1660 /* LINTED - alignment */ 1661 normal.dtnd_normal = *((uint64_t *)addr); 1662 break; 1663 case sizeof (uint32_t): 1664 /* LINTED - alignment */ 1665 normal.dtnd_normal = *((uint32_t *)addr); 1666 break; 1667 case sizeof (uint16_t): 1668 /* LINTED - alignment */ 1669 normal.dtnd_normal = *((uint16_t *)addr); 1670 break; 1671 case sizeof (uint8_t): 1672 normal.dtnd_normal = *((uint8_t *)addr); 1673 break; 1674 default: 1675 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1676 } 1677 1678 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal); 1679 1680 return (0); 1681 } 1682 1683 static int 1684 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1685 { 1686 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1687 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1688 1689 if (agg->dtagd_nrecs == 0) 1690 return (DTRACE_AGGWALK_NEXT); 1691 1692 if (agg->dtagd_varid != id) 1693 return (DTRACE_AGGWALK_NEXT); 1694 1695 return (DTRACE_AGGWALK_DENORMALIZE); 1696 } 1697 1698 static int 1699 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg) 1700 { 1701 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1702 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1703 1704 if (agg->dtagd_nrecs == 0) 1705 return (DTRACE_AGGWALK_NEXT); 1706 1707 if (agg->dtagd_varid != id) 1708 return (DTRACE_AGGWALK_NEXT); 1709 1710 return (DTRACE_AGGWALK_CLEAR); 1711 } 1712 1713 typedef struct dt_trunc { 1714 dtrace_aggvarid_t dttd_id; 1715 uint64_t dttd_remaining; 1716 } dt_trunc_t; 1717 1718 static int 1719 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg) 1720 { 1721 dt_trunc_t *trunc = arg; 1722 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1723 dtrace_aggvarid_t id = trunc->dttd_id; 1724 1725 if (agg->dtagd_nrecs == 0) 1726 return (DTRACE_AGGWALK_NEXT); 1727 1728 if (agg->dtagd_varid != id) 1729 return (DTRACE_AGGWALK_NEXT); 1730 1731 if (trunc->dttd_remaining == 0) 1732 return (DTRACE_AGGWALK_REMOVE); 1733 1734 trunc->dttd_remaining--; 1735 return (DTRACE_AGGWALK_NEXT); 1736 } 1737 1738 static int 1739 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1740 { 1741 dt_trunc_t trunc; 1742 caddr_t addr; 1743 int64_t remaining; 1744 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *); 1745 1746 /* 1747 * We (should) have two records: the aggregation ID followed by the 1748 * number of aggregation entries after which the aggregation is to be 1749 * truncated. 1750 */ 1751 addr = base + rec->dtrd_offset; 1752 1753 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1754 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1755 1756 /* LINTED - alignment */ 1757 trunc.dttd_id = *((dtrace_aggvarid_t *)addr); 1758 rec++; 1759 1760 if (rec->dtrd_action != DTRACEACT_LIBACT) 1761 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1762 1763 if (rec->dtrd_arg != DT_ACT_TRUNC) 1764 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1765 1766 addr = base + rec->dtrd_offset; 1767 1768 switch (rec->dtrd_size) { 1769 case sizeof (uint64_t): 1770 /* LINTED - alignment */ 1771 remaining = *((int64_t *)addr); 1772 break; 1773 case sizeof (uint32_t): 1774 /* LINTED - alignment */ 1775 remaining = *((int32_t *)addr); 1776 break; 1777 case sizeof (uint16_t): 1778 /* LINTED - alignment */ 1779 remaining = *((int16_t *)addr); 1780 break; 1781 case sizeof (uint8_t): 1782 remaining = *((int8_t *)addr); 1783 break; 1784 default: 1785 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1786 } 1787 1788 if (remaining < 0) { 1789 func = dtrace_aggregate_walk_valsorted; 1790 remaining = -remaining; 1791 } else { 1792 func = dtrace_aggregate_walk_valrevsorted; 1793 } 1794 1795 assert(remaining >= 0); 1796 trunc.dttd_remaining = remaining; 1797 1798 (void) func(dtp, dt_trunc_agg, &trunc); 1799 1800 return (0); 1801 } 1802 1803 static int 1804 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec, 1805 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata, 1806 uint64_t normal, dt_print_aggdata_t *pd) 1807 { 1808 int err, width; 1809 dtrace_actkind_t act = rec->dtrd_action; 1810 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack; 1811 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1812 1813 static struct { 1814 size_t size; 1815 int width; 1816 int packedwidth; 1817 } *fmt, fmttab[] = { 1818 { sizeof (uint8_t), 3, 3 }, 1819 { sizeof (uint16_t), 5, 5 }, 1820 { sizeof (uint32_t), 8, 8 }, 1821 { sizeof (uint64_t), 16, 16 }, 1822 { 0, -50, 16 } 1823 }; 1824 1825 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) { 1826 dtrace_recdesc_t *r; 1827 1828 width = 0; 1829 1830 /* 1831 * To print our quantization header for either an agghist or 1832 * aggpack aggregation, we need to iterate through all of our 1833 * of our records to determine their width. 1834 */ 1835 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) { 1836 for (fmt = fmttab; fmt->size && 1837 fmt->size != r->dtrd_size; fmt++) 1838 continue; 1839 1840 width += fmt->packedwidth + 1; 1841 } 1842 1843 if (pd->dtpa_agghist) { 1844 if (dt_print_quanthdr(dtp, fp, width) < 0) 1845 return (-1); 1846 } else { 1847 if (dt_print_quanthdr_packed(dtp, fp, 1848 width, aggdata, r->dtrd_action) < 0) 1849 return (-1); 1850 } 1851 1852 pd->dtpa_agghisthdr = agg->dtagd_varid; 1853 } 1854 1855 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) { 1856 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES; 1857 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES; 1858 int64_t val; 1859 1860 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT); 1861 val = (long long)*((uint64_t *)addr); 1862 1863 if (dt_printf(dtp, fp, " ") < 0) 1864 return (-1); 1865 1866 return (dt_print_quantline(dtp, fp, val, normal, 1867 aggdata->dtada_total, positives, negatives)); 1868 } 1869 1870 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) { 1871 switch (act) { 1872 case DTRACEAGG_QUANTIZE: 1873 return (dt_print_quantize_packed(dtp, 1874 fp, addr, size, aggdata)); 1875 case DTRACEAGG_LQUANTIZE: 1876 return (dt_print_lquantize_packed(dtp, 1877 fp, addr, size, aggdata)); 1878 default: 1879 break; 1880 } 1881 } 1882 1883 switch (act) { 1884 case DTRACEACT_STACK: 1885 return (dt_print_stack(dtp, fp, NULL, addr, 1886 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg)); 1887 1888 case DTRACEACT_USTACK: 1889 case DTRACEACT_JSTACK: 1890 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg)); 1891 1892 case DTRACEACT_USYM: 1893 case DTRACEACT_UADDR: 1894 return (dt_print_usym(dtp, fp, addr, act)); 1895 1896 case DTRACEACT_UMOD: 1897 return (dt_print_umod(dtp, fp, NULL, addr)); 1898 1899 case DTRACEACT_SYM: 1900 return (dt_print_sym(dtp, fp, NULL, addr)); 1901 1902 case DTRACEACT_MOD: 1903 return (dt_print_mod(dtp, fp, NULL, addr)); 1904 1905 case DTRACEAGG_QUANTIZE: 1906 return (dt_print_quantize(dtp, fp, addr, size, normal)); 1907 1908 case DTRACEAGG_LQUANTIZE: 1909 return (dt_print_lquantize(dtp, fp, addr, size, normal)); 1910 1911 case DTRACEAGG_LLQUANTIZE: 1912 return (dt_print_llquantize(dtp, fp, addr, size, normal)); 1913 1914 case DTRACEAGG_AVG: 1915 return (dt_print_average(dtp, fp, addr, size, normal)); 1916 1917 case DTRACEAGG_STDDEV: 1918 return (dt_print_stddev(dtp, fp, addr, size, normal)); 1919 1920 default: 1921 break; 1922 } 1923 1924 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++) 1925 continue; 1926 1927 width = packed ? fmt->packedwidth : fmt->width; 1928 1929 switch (size) { 1930 case sizeof (uint64_t): 1931 err = dt_printf(dtp, fp, " %*lld", width, 1932 /* LINTED - alignment */ 1933 (long long)*((uint64_t *)addr) / normal); 1934 break; 1935 case sizeof (uint32_t): 1936 /* LINTED - alignment */ 1937 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) / 1938 (uint32_t)normal); 1939 break; 1940 case sizeof (uint16_t): 1941 /* LINTED - alignment */ 1942 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) / 1943 (uint32_t)normal); 1944 break; 1945 case sizeof (uint8_t): 1946 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) / 1947 (uint32_t)normal); 1948 break; 1949 default: 1950 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0); 1951 break; 1952 } 1953 1954 return (err); 1955 } 1956 1957 int 1958 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg) 1959 { 1960 int i, aggact = 0; 1961 dt_print_aggdata_t *pd = arg; 1962 const dtrace_aggdata_t *aggdata = aggsdata[0]; 1963 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1964 FILE *fp = pd->dtpa_fp; 1965 dtrace_hdl_t *dtp = pd->dtpa_dtp; 1966 dtrace_recdesc_t *rec; 1967 dtrace_actkind_t act; 1968 caddr_t addr; 1969 size_t size; 1970 1971 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL); 1972 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN); 1973 1974 /* 1975 * Iterate over each record description in the key, printing the traced 1976 * data, skipping the first datum (the tuple member created by the 1977 * compiler). 1978 */ 1979 for (i = 1; i < agg->dtagd_nrecs; i++) { 1980 rec = &agg->dtagd_rec[i]; 1981 act = rec->dtrd_action; 1982 addr = aggdata->dtada_data + rec->dtrd_offset; 1983 size = rec->dtrd_size; 1984 1985 if (DTRACEACT_ISAGG(act)) { 1986 aggact = i; 1987 break; 1988 } 1989 1990 if (dt_print_datum(dtp, fp, rec, addr, 1991 size, aggdata, 1, pd) < 0) 1992 return (-1); 1993 1994 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 1995 DTRACE_BUFDATA_AGGKEY) < 0) 1996 return (-1); 1997 } 1998 1999 assert(aggact != 0); 2000 2001 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) { 2002 uint64_t normal; 2003 2004 aggdata = aggsdata[i]; 2005 agg = aggdata->dtada_desc; 2006 rec = &agg->dtagd_rec[aggact]; 2007 act = rec->dtrd_action; 2008 addr = aggdata->dtada_data + rec->dtrd_offset; 2009 size = rec->dtrd_size; 2010 2011 assert(DTRACEACT_ISAGG(act)); 2012 normal = aggdata->dtada_normal; 2013 2014 if (dt_print_datum(dtp, fp, rec, addr, 2015 size, aggdata, normal, pd) < 0) 2016 return (-1); 2017 2018 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 2019 DTRACE_BUFDATA_AGGVAL) < 0) 2020 return (-1); 2021 2022 if (!pd->dtpa_allunprint) 2023 agg->dtagd_flags |= DTRACE_AGD_PRINTED; 2024 } 2025 2026 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) { 2027 if (dt_printf(dtp, fp, "\n") < 0) 2028 return (-1); 2029 } 2030 2031 if (dt_buffered_flush(dtp, NULL, NULL, aggdata, 2032 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0) 2033 return (-1); 2034 2035 return (0); 2036 } 2037 2038 int 2039 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg) 2040 { 2041 dt_print_aggdata_t *pd = arg; 2042 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2043 dtrace_aggvarid_t aggvarid = pd->dtpa_id; 2044 2045 if (pd->dtpa_allunprint) { 2046 if (agg->dtagd_flags & DTRACE_AGD_PRINTED) 2047 return (0); 2048 } else { 2049 /* 2050 * If we're not printing all unprinted aggregations, then the 2051 * aggregation variable ID denotes a specific aggregation 2052 * variable that we should print -- skip any other aggregations 2053 * that we encounter. 2054 */ 2055 if (agg->dtagd_nrecs == 0) 2056 return (0); 2057 2058 if (aggvarid != agg->dtagd_varid) 2059 return (0); 2060 } 2061 2062 return (dt_print_aggs(&aggdata, 1, arg)); 2063 } 2064 2065 int 2066 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data, 2067 const char *option, const char *value) 2068 { 2069 int len, rval; 2070 char *msg; 2071 const char *errstr; 2072 dtrace_setoptdata_t optdata; 2073 2074 bzero(&optdata, sizeof (optdata)); 2075 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval); 2076 2077 if (dtrace_setopt(dtp, option, value) == 0) { 2078 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval); 2079 optdata.dtsda_probe = data; 2080 optdata.dtsda_option = option; 2081 optdata.dtsda_handle = dtp; 2082 2083 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0) 2084 return (rval); 2085 2086 return (0); 2087 } 2088 2089 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp)); 2090 len = strlen(option) + strlen(value) + strlen(errstr) + 80; 2091 msg = alloca(len); 2092 2093 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n", 2094 option, value, errstr); 2095 2096 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0) 2097 return (0); 2098 2099 return (rval); 2100 } 2101 2102 static int 2103 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu, 2104 dtrace_bufdesc_t *buf, boolean_t just_one, 2105 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg) 2106 { 2107 dtrace_epid_t id; 2108 size_t offs; 2109 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET); 2110 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 2111 int rval, i, n; 2112 uint64_t tracememsize = 0; 2113 dtrace_probedata_t data; 2114 uint64_t drops; 2115 2116 bzero(&data, sizeof (data)); 2117 data.dtpda_handle = dtp; 2118 data.dtpda_cpu = cpu; 2119 data.dtpda_flow = dtp->dt_flow; 2120 data.dtpda_indent = dtp->dt_indent; 2121 data.dtpda_prefix = dtp->dt_prefix; 2122 2123 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) { 2124 dtrace_eprobedesc_t *epd; 2125 2126 /* 2127 * We're guaranteed to have an ID. 2128 */ 2129 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 2130 2131 if (id == DTRACE_EPIDNONE) { 2132 /* 2133 * This is filler to assure proper alignment of the 2134 * next record; we simply ignore it. 2135 */ 2136 offs += sizeof (id); 2137 continue; 2138 } 2139 2140 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc, 2141 &data.dtpda_pdesc)) != 0) 2142 return (rval); 2143 2144 epd = data.dtpda_edesc; 2145 data.dtpda_data = buf->dtbd_data + offs; 2146 2147 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) { 2148 rval = dt_handle(dtp, &data); 2149 2150 if (rval == DTRACE_CONSUME_NEXT) 2151 goto nextepid; 2152 2153 if (rval == DTRACE_CONSUME_ERROR) 2154 return (-1); 2155 } 2156 2157 if (flow) 2158 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid, 2159 buf, offs); 2160 2161 rval = (*efunc)(&data, arg); 2162 2163 if (flow) { 2164 if (data.dtpda_flow == DTRACEFLOW_ENTRY) 2165 data.dtpda_indent += 2; 2166 } 2167 2168 if (rval == DTRACE_CONSUME_NEXT) 2169 goto nextepid; 2170 2171 if (rval == DTRACE_CONSUME_ABORT) 2172 return (dt_set_errno(dtp, EDT_DIRABORT)); 2173 2174 if (rval != DTRACE_CONSUME_THIS) 2175 return (dt_set_errno(dtp, EDT_BADRVAL)); 2176 2177 for (i = 0; i < epd->dtepd_nrecs; i++) { 2178 caddr_t addr; 2179 dtrace_recdesc_t *rec = &epd->dtepd_rec[i]; 2180 dtrace_actkind_t act = rec->dtrd_action; 2181 2182 data.dtpda_data = buf->dtbd_data + offs + 2183 rec->dtrd_offset; 2184 addr = data.dtpda_data; 2185 2186 if (act == DTRACEACT_LIBACT) { 2187 uint64_t arg = rec->dtrd_arg; 2188 dtrace_aggvarid_t id; 2189 2190 switch (arg) { 2191 case DT_ACT_CLEAR: 2192 /* LINTED - alignment */ 2193 id = *((dtrace_aggvarid_t *)addr); 2194 (void) dtrace_aggregate_walk(dtp, 2195 dt_clear_agg, &id); 2196 continue; 2197 2198 case DT_ACT_DENORMALIZE: 2199 /* LINTED - alignment */ 2200 id = *((dtrace_aggvarid_t *)addr); 2201 (void) dtrace_aggregate_walk(dtp, 2202 dt_denormalize_agg, &id); 2203 continue; 2204 2205 case DT_ACT_FTRUNCATE: 2206 if (fp == NULL) 2207 continue; 2208 2209 (void) fflush(fp); 2210 (void) ftruncate(fileno(fp), 0); 2211 (void) fseeko(fp, 0, SEEK_SET); 2212 continue; 2213 2214 case DT_ACT_NORMALIZE: 2215 if (i == epd->dtepd_nrecs - 1) 2216 return (dt_set_errno(dtp, 2217 EDT_BADNORMAL)); 2218 2219 if (dt_normalize(dtp, 2220 buf->dtbd_data + offs, rec) != 0) 2221 return (-1); 2222 2223 i++; 2224 continue; 2225 2226 case DT_ACT_SETOPT: { 2227 uint64_t *opts = dtp->dt_options; 2228 dtrace_recdesc_t *valrec; 2229 uint32_t valsize; 2230 caddr_t val; 2231 int rv; 2232 2233 if (i == epd->dtepd_nrecs - 1) { 2234 return (dt_set_errno(dtp, 2235 EDT_BADSETOPT)); 2236 } 2237 2238 valrec = &epd->dtepd_rec[++i]; 2239 valsize = valrec->dtrd_size; 2240 2241 if (valrec->dtrd_action != act || 2242 valrec->dtrd_arg != arg) { 2243 return (dt_set_errno(dtp, 2244 EDT_BADSETOPT)); 2245 } 2246 2247 if (valsize > sizeof (uint64_t)) { 2248 val = buf->dtbd_data + offs + 2249 valrec->dtrd_offset; 2250 } else { 2251 val = "1"; 2252 } 2253 2254 rv = dt_setopt(dtp, &data, addr, val); 2255 2256 if (rv != 0) 2257 return (-1); 2258 2259 flow = (opts[DTRACEOPT_FLOWINDENT] != 2260 DTRACEOPT_UNSET); 2261 quiet = (opts[DTRACEOPT_QUIET] != 2262 DTRACEOPT_UNSET); 2263 2264 continue; 2265 } 2266 2267 case DT_ACT_TRUNC: 2268 if (i == epd->dtepd_nrecs - 1) 2269 return (dt_set_errno(dtp, 2270 EDT_BADTRUNC)); 2271 2272 if (dt_trunc(dtp, 2273 buf->dtbd_data + offs, rec) != 0) 2274 return (-1); 2275 2276 i++; 2277 continue; 2278 2279 default: 2280 continue; 2281 } 2282 } 2283 2284 if (act == DTRACEACT_TRACEMEM_DYNSIZE && 2285 rec->dtrd_size == sizeof (uint64_t)) { 2286 /* LINTED - alignment */ 2287 tracememsize = *((unsigned long long *)addr); 2288 continue; 2289 } 2290 2291 rval = (*rfunc)(&data, rec, arg); 2292 2293 if (rval == DTRACE_CONSUME_NEXT) 2294 continue; 2295 2296 if (rval == DTRACE_CONSUME_ABORT) 2297 return (dt_set_errno(dtp, EDT_DIRABORT)); 2298 2299 if (rval != DTRACE_CONSUME_THIS) 2300 return (dt_set_errno(dtp, EDT_BADRVAL)); 2301 2302 if (act == DTRACEACT_STACK) { 2303 int depth = rec->dtrd_arg; 2304 2305 if (dt_print_stack(dtp, fp, NULL, addr, depth, 2306 rec->dtrd_size / depth) < 0) 2307 return (-1); 2308 goto nextrec; 2309 } 2310 2311 if (act == DTRACEACT_USTACK || 2312 act == DTRACEACT_JSTACK) { 2313 if (dt_print_ustack(dtp, fp, NULL, 2314 addr, rec->dtrd_arg) < 0) 2315 return (-1); 2316 goto nextrec; 2317 } 2318 2319 if (act == DTRACEACT_SYM) { 2320 if (dt_print_sym(dtp, fp, NULL, addr) < 0) 2321 return (-1); 2322 goto nextrec; 2323 } 2324 2325 if (act == DTRACEACT_MOD) { 2326 if (dt_print_mod(dtp, fp, NULL, addr) < 0) 2327 return (-1); 2328 goto nextrec; 2329 } 2330 2331 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) { 2332 if (dt_print_usym(dtp, fp, addr, act) < 0) 2333 return (-1); 2334 goto nextrec; 2335 } 2336 2337 if (act == DTRACEACT_UMOD) { 2338 if (dt_print_umod(dtp, fp, NULL, addr) < 0) 2339 return (-1); 2340 goto nextrec; 2341 } 2342 2343 if (act == DTRACEACT_PRINTM) { 2344 if (dt_print_memory(dtp, fp, addr) < 0) 2345 return (-1); 2346 goto nextrec; 2347 } 2348 2349 if (DTRACEACT_ISPRINTFLIKE(act)) { 2350 void *fmtdata; 2351 int (*func)(dtrace_hdl_t *, FILE *, void *, 2352 const dtrace_probedata_t *, 2353 const dtrace_recdesc_t *, uint_t, 2354 const void *buf, size_t); 2355 2356 if ((fmtdata = dt_format_lookup(dtp, 2357 rec->dtrd_format)) == NULL) 2358 goto nofmt; 2359 2360 switch (act) { 2361 case DTRACEACT_PRINTF: 2362 func = dtrace_fprintf; 2363 break; 2364 case DTRACEACT_PRINTA: 2365 func = dtrace_fprinta; 2366 break; 2367 case DTRACEACT_SYSTEM: 2368 func = dtrace_system; 2369 break; 2370 case DTRACEACT_FREOPEN: 2371 func = dtrace_freopen; 2372 break; 2373 } 2374 2375 n = (*func)(dtp, fp, fmtdata, &data, 2376 rec, epd->dtepd_nrecs - i, 2377 (uchar_t *)buf->dtbd_data + offs, 2378 buf->dtbd_size - offs); 2379 2380 if (n < 0) 2381 return (-1); /* errno is set for us */ 2382 2383 if (n > 0) 2384 i += n - 1; 2385 goto nextrec; 2386 } 2387 2388 /* 2389 * If this is a DIF expression, and the record has a 2390 * format set, this indicates we have a CTF type name 2391 * associated with the data and we should try to print 2392 * it out by type. 2393 */ 2394 if (act == DTRACEACT_DIFEXPR) { 2395 const char *strdata = dt_strdata_lookup(dtp, 2396 rec->dtrd_format); 2397 if (strdata != NULL) { 2398 n = dtrace_print(dtp, fp, strdata, 2399 addr, rec->dtrd_size); 2400 2401 /* 2402 * dtrace_print() will return -1 on 2403 * error, or return the number of bytes 2404 * consumed. It will return 0 if the 2405 * type couldn't be determined, and we 2406 * should fall through to the normal 2407 * trace method. 2408 */ 2409 if (n < 0) 2410 return (-1); 2411 2412 if (n > 0) 2413 goto nextrec; 2414 } 2415 } 2416 2417 nofmt: 2418 if (act == DTRACEACT_PRINTA) { 2419 dt_print_aggdata_t pd; 2420 dtrace_aggvarid_t *aggvars; 2421 int j, naggvars = 0; 2422 size_t size = ((epd->dtepd_nrecs - i) * 2423 sizeof (dtrace_aggvarid_t)); 2424 2425 if ((aggvars = dt_alloc(dtp, size)) == NULL) 2426 return (-1); 2427 2428 /* 2429 * This might be a printa() with multiple 2430 * aggregation variables. We need to scan 2431 * forward through the records until we find 2432 * a record from a different statement. 2433 */ 2434 for (j = i; j < epd->dtepd_nrecs; j++) { 2435 dtrace_recdesc_t *nrec; 2436 caddr_t naddr; 2437 2438 nrec = &epd->dtepd_rec[j]; 2439 2440 if (nrec->dtrd_uarg != rec->dtrd_uarg) 2441 break; 2442 2443 if (nrec->dtrd_action != act) { 2444 return (dt_set_errno(dtp, 2445 EDT_BADAGG)); 2446 } 2447 2448 naddr = buf->dtbd_data + offs + 2449 nrec->dtrd_offset; 2450 2451 aggvars[naggvars++] = 2452 /* LINTED - alignment */ 2453 *((dtrace_aggvarid_t *)naddr); 2454 } 2455 2456 i = j - 1; 2457 bzero(&pd, sizeof (pd)); 2458 pd.dtpa_dtp = dtp; 2459 pd.dtpa_fp = fp; 2460 2461 assert(naggvars >= 1); 2462 2463 if (naggvars == 1) { 2464 pd.dtpa_id = aggvars[0]; 2465 dt_free(dtp, aggvars); 2466 2467 if (dt_printf(dtp, fp, "\n") < 0 || 2468 dtrace_aggregate_walk_sorted(dtp, 2469 dt_print_agg, &pd) < 0) 2470 return (-1); 2471 goto nextrec; 2472 } 2473 2474 if (dt_printf(dtp, fp, "\n") < 0 || 2475 dtrace_aggregate_walk_joined(dtp, aggvars, 2476 naggvars, dt_print_aggs, &pd) < 0) { 2477 dt_free(dtp, aggvars); 2478 return (-1); 2479 } 2480 2481 dt_free(dtp, aggvars); 2482 goto nextrec; 2483 } 2484 2485 if (act == DTRACEACT_TRACEMEM) { 2486 if (tracememsize == 0 || 2487 tracememsize > rec->dtrd_size) { 2488 tracememsize = rec->dtrd_size; 2489 } 2490 2491 n = dt_print_bytes(dtp, fp, addr, 2492 tracememsize, -33, quiet, 1); 2493 2494 tracememsize = 0; 2495 2496 if (n < 0) 2497 return (-1); 2498 2499 goto nextrec; 2500 } 2501 2502 switch (rec->dtrd_size) { 2503 case sizeof (uint64_t): 2504 n = dt_printf(dtp, fp, 2505 quiet ? "%lld" : " %16lld", 2506 /* LINTED - alignment */ 2507 *((unsigned long long *)addr)); 2508 break; 2509 case sizeof (uint32_t): 2510 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", 2511 /* LINTED - alignment */ 2512 *((uint32_t *)addr)); 2513 break; 2514 case sizeof (uint16_t): 2515 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", 2516 /* LINTED - alignment */ 2517 *((uint16_t *)addr)); 2518 break; 2519 case sizeof (uint8_t): 2520 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", 2521 *((uint8_t *)addr)); 2522 break; 2523 default: 2524 n = dt_print_bytes(dtp, fp, addr, 2525 rec->dtrd_size, -33, quiet, 0); 2526 break; 2527 } 2528 2529 if (n < 0) 2530 return (-1); /* errno is set for us */ 2531 2532 nextrec: 2533 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0) 2534 return (-1); /* errno is set for us */ 2535 } 2536 2537 /* 2538 * Call the record callback with a NULL record to indicate 2539 * that we're done processing this EPID. 2540 */ 2541 rval = (*rfunc)(&data, NULL, arg); 2542 nextepid: 2543 offs += epd->dtepd_size; 2544 dtp->dt_last_epid = id; 2545 if (just_one) { 2546 buf->dtbd_oldest = offs; 2547 break; 2548 } 2549 } 2550 2551 dtp->dt_flow = data.dtpda_flow; 2552 dtp->dt_indent = data.dtpda_indent; 2553 dtp->dt_prefix = data.dtpda_prefix; 2554 2555 if ((drops = buf->dtbd_drops) == 0) 2556 return (0); 2557 2558 /* 2559 * Explicitly zero the drops to prevent us from processing them again. 2560 */ 2561 buf->dtbd_drops = 0; 2562 2563 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops)); 2564 } 2565 2566 /* 2567 * Reduce memory usage by shrinking the buffer if it's no more than half full. 2568 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is 2569 * only 4-byte aligned. 2570 */ 2571 static void 2572 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize) 2573 { 2574 uint64_t used = buf->dtbd_size - buf->dtbd_oldest; 2575 if (used < cursize / 2) { 2576 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2577 char *newdata = dt_alloc(dtp, used + misalign); 2578 if (newdata == NULL) 2579 return; 2580 bzero(newdata, misalign); 2581 bcopy(buf->dtbd_data + buf->dtbd_oldest, 2582 newdata + misalign, used); 2583 dt_free(dtp, buf->dtbd_data); 2584 buf->dtbd_oldest = misalign; 2585 buf->dtbd_size = used + misalign; 2586 buf->dtbd_data = newdata; 2587 } 2588 } 2589 2590 /* 2591 * If the ring buffer has wrapped, the data is not in order. Rearrange it 2592 * so that it is. Note, we need to preserve the alignment of the data at 2593 * dtbd_oldest, which is only 4-byte aligned. 2594 */ 2595 static int 2596 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2597 { 2598 int misalign; 2599 char *newdata, *ndp; 2600 2601 if (buf->dtbd_oldest == 0) 2602 return (0); 2603 2604 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2605 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign); 2606 2607 if (newdata == NULL) 2608 return (-1); 2609 2610 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1))); 2611 2612 bzero(ndp, misalign); 2613 ndp += misalign; 2614 2615 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp, 2616 buf->dtbd_size - buf->dtbd_oldest); 2617 ndp += buf->dtbd_size - buf->dtbd_oldest; 2618 2619 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest); 2620 2621 dt_free(dtp, buf->dtbd_data); 2622 buf->dtbd_oldest = 0; 2623 buf->dtbd_data = newdata; 2624 buf->dtbd_size += misalign; 2625 2626 return (0); 2627 } 2628 2629 static void 2630 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2631 { 2632 dt_free(dtp, buf->dtbd_data); 2633 dt_free(dtp, buf); 2634 } 2635 2636 /* 2637 * Returns 0 on success, in which case *cbp will be filled in if we retrieved 2638 * data, or NULL if there is no data for this CPU. 2639 * Returns -1 on failure and sets dt_errno. 2640 */ 2641 static int 2642 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp) 2643 { 2644 dtrace_optval_t size; 2645 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf)); 2646 int error, rval; 2647 2648 if (buf == NULL) 2649 return (-1); 2650 2651 (void) dtrace_getopt(dtp, "bufsize", &size); 2652 buf->dtbd_data = dt_alloc(dtp, size); 2653 if (buf->dtbd_data == NULL) { 2654 dt_free(dtp, buf); 2655 return (-1); 2656 } 2657 buf->dtbd_size = size; 2658 buf->dtbd_cpu = cpu; 2659 2660 #ifdef illumos 2661 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2662 #else 2663 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &buf) == -1) { 2664 #endif 2665 /* 2666 * If we failed with ENOENT, it may be because the 2667 * CPU was unconfigured -- this is okay. Any other 2668 * error, however, is unexpected. 2669 */ 2670 if (errno == ENOENT) { 2671 *bufp = NULL; 2672 rval = 0; 2673 } else 2674 rval = dt_set_errno(dtp, errno); 2675 2676 dt_put_buf(dtp, buf); 2677 return (rval); 2678 } 2679 2680 error = dt_unring_buf(dtp, buf); 2681 if (error != 0) { 2682 dt_put_buf(dtp, buf); 2683 return (error); 2684 } 2685 dt_realloc_buf(dtp, buf, size); 2686 2687 *bufp = buf; 2688 return (0); 2689 } 2690 2691 typedef struct dt_begin { 2692 dtrace_consume_probe_f *dtbgn_probefunc; 2693 dtrace_consume_rec_f *dtbgn_recfunc; 2694 void *dtbgn_arg; 2695 dtrace_handle_err_f *dtbgn_errhdlr; 2696 void *dtbgn_errarg; 2697 int dtbgn_beginonly; 2698 } dt_begin_t; 2699 2700 static int 2701 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg) 2702 { 2703 dt_begin_t *begin = arg; 2704 dtrace_probedesc_t *pd = data->dtpda_pdesc; 2705 2706 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2707 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2708 2709 if (begin->dtbgn_beginonly) { 2710 if (!(r1 && r2)) 2711 return (DTRACE_CONSUME_NEXT); 2712 } else { 2713 if (r1 && r2) 2714 return (DTRACE_CONSUME_NEXT); 2715 } 2716 2717 /* 2718 * We have a record that we're interested in. Now call the underlying 2719 * probe function... 2720 */ 2721 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg)); 2722 } 2723 2724 static int 2725 dt_consume_begin_record(const dtrace_probedata_t *data, 2726 const dtrace_recdesc_t *rec, void *arg) 2727 { 2728 dt_begin_t *begin = arg; 2729 2730 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg)); 2731 } 2732 2733 static int 2734 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg) 2735 { 2736 dt_begin_t *begin = (dt_begin_t *)arg; 2737 dtrace_probedesc_t *pd = data->dteda_pdesc; 2738 2739 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2740 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2741 2742 if (begin->dtbgn_beginonly) { 2743 if (!(r1 && r2)) 2744 return (DTRACE_HANDLE_OK); 2745 } else { 2746 if (r1 && r2) 2747 return (DTRACE_HANDLE_OK); 2748 } 2749 2750 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg)); 2751 } 2752 2753 static int 2754 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, 2755 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2756 { 2757 /* 2758 * There's this idea that the BEGIN probe should be processed before 2759 * everything else, and that the END probe should be processed after 2760 * anything else. In the common case, this is pretty easy to deal 2761 * with. However, a situation may arise where the BEGIN enabling and 2762 * END enabling are on the same CPU, and some enabling in the middle 2763 * occurred on a different CPU. To deal with this (blech!) we need to 2764 * consume the BEGIN buffer up until the end of the BEGIN probe, and 2765 * then set it aside. We will then process every other CPU, and then 2766 * we'll return to the BEGIN CPU and process the rest of the data 2767 * (which will inevitably include the END probe, if any). Making this 2768 * even more complicated (!) is the library's ERROR enabling. Because 2769 * this enabling is processed before we even get into the consume call 2770 * back, any ERROR firing would result in the library's ERROR enabling 2771 * being processed twice -- once in our first pass (for BEGIN probes), 2772 * and again in our second pass (for everything but BEGIN probes). To 2773 * deal with this, we interpose on the ERROR handler to assure that we 2774 * only process ERROR enablings induced by BEGIN enablings in the 2775 * first pass, and that we only process ERROR enablings _not_ induced 2776 * by BEGIN enablings in the second pass. 2777 */ 2778 2779 dt_begin_t begin; 2780 processorid_t cpu = dtp->dt_beganon; 2781 int rval, i; 2782 static int max_ncpus; 2783 dtrace_bufdesc_t *buf; 2784 2785 dtp->dt_beganon = -1; 2786 2787 if (dt_get_buf(dtp, cpu, &buf) != 0) 2788 return (-1); 2789 if (buf == NULL) 2790 return (0); 2791 2792 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { 2793 /* 2794 * This is the simple case. We're either not stopped, or if 2795 * we are, we actually processed any END probes on another 2796 * CPU. We can simply consume this buffer and return. 2797 */ 2798 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2799 pf, rf, arg); 2800 dt_put_buf(dtp, buf); 2801 return (rval); 2802 } 2803 2804 begin.dtbgn_probefunc = pf; 2805 begin.dtbgn_recfunc = rf; 2806 begin.dtbgn_arg = arg; 2807 begin.dtbgn_beginonly = 1; 2808 2809 /* 2810 * We need to interpose on the ERROR handler to be sure that we 2811 * only process ERRORs induced by BEGIN. 2812 */ 2813 begin.dtbgn_errhdlr = dtp->dt_errhdlr; 2814 begin.dtbgn_errarg = dtp->dt_errarg; 2815 dtp->dt_errhdlr = dt_consume_begin_error; 2816 dtp->dt_errarg = &begin; 2817 2818 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2819 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2820 2821 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2822 dtp->dt_errarg = begin.dtbgn_errarg; 2823 2824 if (rval != 0) { 2825 dt_put_buf(dtp, buf); 2826 return (rval); 2827 } 2828 2829 if (max_ncpus == 0) 2830 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2831 2832 for (i = 0; i < max_ncpus; i++) { 2833 dtrace_bufdesc_t *nbuf; 2834 if (i == cpu) 2835 continue; 2836 2837 if (dt_get_buf(dtp, i, &nbuf) != 0) { 2838 dt_put_buf(dtp, buf); 2839 return (-1); 2840 } 2841 if (nbuf == NULL) 2842 continue; 2843 2844 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE, 2845 pf, rf, arg); 2846 dt_put_buf(dtp, nbuf); 2847 if (rval != 0) { 2848 dt_put_buf(dtp, buf); 2849 return (rval); 2850 } 2851 } 2852 2853 /* 2854 * Okay -- we're done with the other buffers. Now we want to 2855 * reconsume the first buffer -- but this time we're looking for 2856 * everything _but_ BEGIN. And of course, in order to only consume 2857 * those ERRORs _not_ associated with BEGIN, we need to reinstall our 2858 * ERROR interposition function... 2859 */ 2860 begin.dtbgn_beginonly = 0; 2861 2862 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); 2863 assert(begin.dtbgn_errarg == dtp->dt_errarg); 2864 dtp->dt_errhdlr = dt_consume_begin_error; 2865 dtp->dt_errarg = &begin; 2866 2867 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2868 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2869 2870 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2871 dtp->dt_errarg = begin.dtbgn_errarg; 2872 2873 return (rval); 2874 } 2875 2876 /* ARGSUSED */ 2877 static uint64_t 2878 dt_buf_oldest(void *elem, void *arg) 2879 { 2880 dtrace_bufdesc_t *buf = elem; 2881 size_t offs = buf->dtbd_oldest; 2882 2883 while (offs < buf->dtbd_size) { 2884 dtrace_rechdr_t *dtrh = 2885 /* LINTED - alignment */ 2886 (dtrace_rechdr_t *)(buf->dtbd_data + offs); 2887 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2888 offs += sizeof (dtrace_epid_t); 2889 } else { 2890 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh)); 2891 } 2892 } 2893 2894 /* There are no records left; use the time the buffer was retrieved. */ 2895 return (buf->dtbd_timestamp); 2896 } 2897 2898 int 2899 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp, 2900 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2901 { 2902 dtrace_optval_t size; 2903 static int max_ncpus; 2904 int i, rval; 2905 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; 2906 hrtime_t now = gethrtime(); 2907 2908 if (dtp->dt_lastswitch != 0) { 2909 if (now - dtp->dt_lastswitch < interval) 2910 return (0); 2911 2912 dtp->dt_lastswitch += interval; 2913 } else { 2914 dtp->dt_lastswitch = now; 2915 } 2916 2917 if (!dtp->dt_active) 2918 return (dt_set_errno(dtp, EINVAL)); 2919 2920 if (max_ncpus == 0) 2921 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2922 2923 if (pf == NULL) 2924 pf = (dtrace_consume_probe_f *)dt_nullprobe; 2925 2926 if (rf == NULL) 2927 rf = (dtrace_consume_rec_f *)dt_nullrec; 2928 2929 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) { 2930 /* 2931 * The output will not be in the order it was traced. Rather, 2932 * we will consume all of the data from each CPU's buffer in 2933 * turn. We apply special handling for the records from BEGIN 2934 * and END probes so that they are consumed first and last, 2935 * respectively. 2936 * 2937 * If we have just begun, we want to first process the CPU that 2938 * executed the BEGIN probe (if any). 2939 */ 2940 if (dtp->dt_active && dtp->dt_beganon != -1 && 2941 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0) 2942 return (rval); 2943 2944 for (i = 0; i < max_ncpus; i++) { 2945 dtrace_bufdesc_t *buf; 2946 2947 /* 2948 * If we have stopped, we want to process the CPU on 2949 * which the END probe was processed only _after_ we 2950 * have processed everything else. 2951 */ 2952 if (dtp->dt_stopped && (i == dtp->dt_endedon)) 2953 continue; 2954 2955 if (dt_get_buf(dtp, i, &buf) != 0) 2956 return (-1); 2957 if (buf == NULL) 2958 continue; 2959 2960 dtp->dt_flow = 0; 2961 dtp->dt_indent = 0; 2962 dtp->dt_prefix = NULL; 2963 rval = dt_consume_cpu(dtp, fp, i, 2964 buf, B_FALSE, pf, rf, arg); 2965 dt_put_buf(dtp, buf); 2966 if (rval != 0) 2967 return (rval); 2968 } 2969 if (dtp->dt_stopped) { 2970 dtrace_bufdesc_t *buf; 2971 2972 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0) 2973 return (-1); 2974 if (buf == NULL) 2975 return (0); 2976 2977 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon, 2978 buf, B_FALSE, pf, rf, arg); 2979 dt_put_buf(dtp, buf); 2980 return (rval); 2981 } 2982 } else { 2983 /* 2984 * The output will be in the order it was traced (or for 2985 * speculations, when it was committed). We retrieve a buffer 2986 * from each CPU and put it into a priority queue, which sorts 2987 * based on the first entry in the buffer. This is sufficient 2988 * because entries within a buffer are already sorted. 2989 * 2990 * We then consume records one at a time, always consuming the 2991 * oldest record, as determined by the priority queue. When 2992 * we reach the end of the time covered by these buffers, 2993 * we need to stop and retrieve more records on the next pass. 2994 * The kernel tells us the time covered by each buffer, in 2995 * dtbd_timestamp. The first buffer's timestamp tells us the 2996 * time covered by all buffers, as subsequently retrieved 2997 * buffers will cover to a more recent time. 2998 */ 2999 3000 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t)); 3001 uint64_t first_timestamp = 0; 3002 uint_t cookie = 0; 3003 dtrace_bufdesc_t *buf; 3004 3005 bzero(drops, max_ncpus * sizeof (uint64_t)); 3006 3007 if (dtp->dt_bufq == NULL) { 3008 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2, 3009 dt_buf_oldest, NULL); 3010 if (dtp->dt_bufq == NULL) /* ENOMEM */ 3011 return (-1); 3012 } 3013 3014 /* Retrieve data from each CPU. */ 3015 (void) dtrace_getopt(dtp, "bufsize", &size); 3016 for (i = 0; i < max_ncpus; i++) { 3017 dtrace_bufdesc_t *buf; 3018 3019 if (dt_get_buf(dtp, i, &buf) != 0) 3020 return (-1); 3021 if (buf != NULL) { 3022 if (first_timestamp == 0) 3023 first_timestamp = buf->dtbd_timestamp; 3024 assert(buf->dtbd_timestamp >= first_timestamp); 3025 3026 dt_pq_insert(dtp->dt_bufq, buf); 3027 drops[i] = buf->dtbd_drops; 3028 buf->dtbd_drops = 0; 3029 } 3030 } 3031 3032 /* Consume records. */ 3033 for (;;) { 3034 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq); 3035 uint64_t timestamp; 3036 3037 if (buf == NULL) 3038 break; 3039 3040 timestamp = dt_buf_oldest(buf, dtp); 3041 assert(timestamp >= dtp->dt_last_timestamp); 3042 dtp->dt_last_timestamp = timestamp; 3043 3044 if (timestamp == buf->dtbd_timestamp) { 3045 /* 3046 * We've reached the end of the time covered 3047 * by this buffer. If this is the oldest 3048 * buffer, we must do another pass 3049 * to retrieve more data. 3050 */ 3051 dt_put_buf(dtp, buf); 3052 if (timestamp == first_timestamp && 3053 !dtp->dt_stopped) 3054 break; 3055 continue; 3056 } 3057 3058 if ((rval = dt_consume_cpu(dtp, fp, 3059 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0) 3060 return (rval); 3061 dt_pq_insert(dtp->dt_bufq, buf); 3062 } 3063 3064 /* Consume drops. */ 3065 for (i = 0; i < max_ncpus; i++) { 3066 if (drops[i] != 0) { 3067 int error = dt_handle_cpudrop(dtp, i, 3068 DTRACEDROP_PRINCIPAL, drops[i]); 3069 if (error != 0) 3070 return (error); 3071 } 3072 } 3073 3074 /* 3075 * Reduce memory usage by re-allocating smaller buffers 3076 * for the "remnants". 3077 */ 3078 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie)) 3079 dt_realloc_buf(dtp, buf, buf->dtbd_size); 3080 } 3081 3082 return (0); 3083 } 3084