1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 28 * Copyright (c) 2012 by Delphix. All rights reserved. 29 */ 30 31 #include <stdlib.h> 32 #include <strings.h> 33 #include <errno.h> 34 #include <unistd.h> 35 #include <limits.h> 36 #include <assert.h> 37 #include <ctype.h> 38 #ifdef illumos 39 #include <alloca.h> 40 #endif 41 #include <dt_impl.h> 42 #include <dt_pq.h> 43 #ifndef illumos 44 #include <libproc_compat.h> 45 #endif 46 47 #define DT_MASK_LO 0x00000000FFFFFFFFULL 48 49 /* 50 * We declare this here because (1) we need it and (2) we want to avoid a 51 * dependency on libm in libdtrace. 52 */ 53 static long double 54 dt_fabsl(long double x) 55 { 56 if (x < 0) 57 return (-x); 58 59 return (x); 60 } 61 62 static int 63 dt_ndigits(long long val) 64 { 65 int rval = 1; 66 long long cmp = 10; 67 68 if (val < 0) { 69 val = val == INT64_MIN ? INT64_MAX : -val; 70 rval++; 71 } 72 73 while (val > cmp && cmp > 0) { 74 rval++; 75 cmp *= 10; 76 } 77 78 return (rval < 4 ? 4 : rval); 79 } 80 81 /* 82 * 128-bit arithmetic functions needed to support the stddev() aggregating 83 * action. 84 */ 85 static int 86 dt_gt_128(uint64_t *a, uint64_t *b) 87 { 88 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0])); 89 } 90 91 static int 92 dt_ge_128(uint64_t *a, uint64_t *b) 93 { 94 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0])); 95 } 96 97 static int 98 dt_le_128(uint64_t *a, uint64_t *b) 99 { 100 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); 101 } 102 103 /* 104 * Shift the 128-bit value in a by b. If b is positive, shift left. 105 * If b is negative, shift right. 106 */ 107 static void 108 dt_shift_128(uint64_t *a, int b) 109 { 110 uint64_t mask; 111 112 if (b == 0) 113 return; 114 115 if (b < 0) { 116 b = -b; 117 if (b >= 64) { 118 a[0] = a[1] >> (b - 64); 119 a[1] = 0; 120 } else { 121 a[0] >>= b; 122 mask = 1LL << (64 - b); 123 mask -= 1; 124 a[0] |= ((a[1] & mask) << (64 - b)); 125 a[1] >>= b; 126 } 127 } else { 128 if (b >= 64) { 129 a[1] = a[0] << (b - 64); 130 a[0] = 0; 131 } else { 132 a[1] <<= b; 133 mask = a[0] >> (64 - b); 134 a[1] |= mask; 135 a[0] <<= b; 136 } 137 } 138 } 139 140 static int 141 dt_nbits_128(uint64_t *a) 142 { 143 int nbits = 0; 144 uint64_t tmp[2]; 145 uint64_t zero[2] = { 0, 0 }; 146 147 tmp[0] = a[0]; 148 tmp[1] = a[1]; 149 150 dt_shift_128(tmp, -1); 151 while (dt_gt_128(tmp, zero)) { 152 dt_shift_128(tmp, -1); 153 nbits++; 154 } 155 156 return (nbits); 157 } 158 159 static void 160 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference) 161 { 162 uint64_t result[2]; 163 164 result[0] = minuend[0] - subtrahend[0]; 165 result[1] = minuend[1] - subtrahend[1] - 166 (minuend[0] < subtrahend[0] ? 1 : 0); 167 168 difference[0] = result[0]; 169 difference[1] = result[1]; 170 } 171 172 static void 173 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 174 { 175 uint64_t result[2]; 176 177 result[0] = addend1[0] + addend2[0]; 178 result[1] = addend1[1] + addend2[1] + 179 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 180 181 sum[0] = result[0]; 182 sum[1] = result[1]; 183 } 184 185 /* 186 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 187 * use native multiplication on those, and then re-combine into the 188 * resulting 128-bit value. 189 * 190 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 191 * hi1 * hi2 << 64 + 192 * hi1 * lo2 << 32 + 193 * hi2 * lo1 << 32 + 194 * lo1 * lo2 195 */ 196 static void 197 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 198 { 199 uint64_t hi1, hi2, lo1, lo2; 200 uint64_t tmp[2]; 201 202 hi1 = factor1 >> 32; 203 hi2 = factor2 >> 32; 204 205 lo1 = factor1 & DT_MASK_LO; 206 lo2 = factor2 & DT_MASK_LO; 207 208 product[0] = lo1 * lo2; 209 product[1] = hi1 * hi2; 210 211 tmp[0] = hi1 * lo2; 212 tmp[1] = 0; 213 dt_shift_128(tmp, 32); 214 dt_add_128(product, tmp, product); 215 216 tmp[0] = hi2 * lo1; 217 tmp[1] = 0; 218 dt_shift_128(tmp, 32); 219 dt_add_128(product, tmp, product); 220 } 221 222 /* 223 * This is long-hand division. 224 * 225 * We initialize subtrahend by shifting divisor left as far as possible. We 226 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we 227 * subtract and set the appropriate bit in the result. We then shift 228 * subtrahend right by one bit for the next comparison. 229 */ 230 static void 231 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient) 232 { 233 uint64_t result[2] = { 0, 0 }; 234 uint64_t remainder[2]; 235 uint64_t subtrahend[2]; 236 uint64_t divisor_128[2]; 237 uint64_t mask[2] = { 1, 0 }; 238 int log = 0; 239 240 assert(divisor != 0); 241 242 divisor_128[0] = divisor; 243 divisor_128[1] = 0; 244 245 remainder[0] = dividend[0]; 246 remainder[1] = dividend[1]; 247 248 subtrahend[0] = divisor; 249 subtrahend[1] = 0; 250 251 while (divisor > 0) { 252 log++; 253 divisor >>= 1; 254 } 255 256 dt_shift_128(subtrahend, 128 - log); 257 dt_shift_128(mask, 128 - log); 258 259 while (dt_ge_128(remainder, divisor_128)) { 260 if (dt_ge_128(remainder, subtrahend)) { 261 dt_subtract_128(remainder, subtrahend, remainder); 262 result[0] |= mask[0]; 263 result[1] |= mask[1]; 264 } 265 266 dt_shift_128(subtrahend, -1); 267 dt_shift_128(mask, -1); 268 } 269 270 quotient[0] = result[0]; 271 quotient[1] = result[1]; 272 } 273 274 /* 275 * This is the long-hand method of calculating a square root. 276 * The algorithm is as follows: 277 * 278 * 1. Group the digits by 2 from the right. 279 * 2. Over the leftmost group, find the largest single-digit number 280 * whose square is less than that group. 281 * 3. Subtract the result of the previous step (2 or 4, depending) and 282 * bring down the next two-digit group. 283 * 4. For the result R we have so far, find the largest single-digit number 284 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3. 285 * (Note that this is doubling R and performing a decimal left-shift by 1 286 * and searching for the appropriate decimal to fill the one's place.) 287 * The value x is the next digit in the square root. 288 * Repeat steps 3 and 4 until the desired precision is reached. (We're 289 * dealing with integers, so the above is sufficient.) 290 * 291 * In decimal, the square root of 582,734 would be calculated as so: 292 * 293 * __7__6__3 294 * | 58 27 34 295 * -49 (7^2 == 49 => 7 is the first digit in the square root) 296 * -- 297 * 9 27 (Subtract and bring down the next group.) 298 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in 299 * ----- the square root) 300 * 51 34 (Subtract and bring down the next group.) 301 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in 302 * ----- the square root) 303 * 5 65 (remainder) 304 * 305 * The above algorithm applies similarly in binary, but note that the 306 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a 307 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the 308 * preceding difference? 309 * 310 * In binary, the square root of 11011011 would be calculated as so: 311 * 312 * __1__1__1__0 313 * | 11 01 10 11 314 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1) 315 * -- 316 * 10 01 10 11 317 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1) 318 * ----- 319 * 1 00 10 11 320 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1) 321 * ------- 322 * 1 01 11 323 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0) 324 * 325 */ 326 static uint64_t 327 dt_sqrt_128(uint64_t *square) 328 { 329 uint64_t result[2] = { 0, 0 }; 330 uint64_t diff[2] = { 0, 0 }; 331 uint64_t one[2] = { 1, 0 }; 332 uint64_t next_pair[2]; 333 uint64_t next_try[2]; 334 uint64_t bit_pairs, pair_shift; 335 int i; 336 337 bit_pairs = dt_nbits_128(square) / 2; 338 pair_shift = bit_pairs * 2; 339 340 for (i = 0; i <= bit_pairs; i++) { 341 /* 342 * Bring down the next pair of bits. 343 */ 344 next_pair[0] = square[0]; 345 next_pair[1] = square[1]; 346 dt_shift_128(next_pair, -pair_shift); 347 next_pair[0] &= 0x3; 348 next_pair[1] = 0; 349 350 dt_shift_128(diff, 2); 351 dt_add_128(diff, next_pair, diff); 352 353 /* 354 * next_try = R << 2 + 1 355 */ 356 next_try[0] = result[0]; 357 next_try[1] = result[1]; 358 dt_shift_128(next_try, 2); 359 dt_add_128(next_try, one, next_try); 360 361 if (dt_le_128(next_try, diff)) { 362 dt_subtract_128(diff, next_try, diff); 363 dt_shift_128(result, 1); 364 dt_add_128(result, one, result); 365 } else { 366 dt_shift_128(result, 1); 367 } 368 369 pair_shift -= 2; 370 } 371 372 assert(result[1] == 0); 373 374 return (result[0]); 375 } 376 377 uint64_t 378 dt_stddev(uint64_t *data, uint64_t normal) 379 { 380 uint64_t avg_of_squares[2]; 381 uint64_t square_of_avg[2]; 382 int64_t norm_avg; 383 uint64_t diff[2]; 384 385 if (data[0] == 0) 386 return (0); 387 388 /* 389 * The standard approximation for standard deviation is 390 * sqrt(average(x**2) - average(x)**2), i.e. the square root 391 * of the average of the squares minus the square of the average. 392 * When normalizing, we should divide the sum of x**2 by normal**2. 393 */ 394 dt_divide_128(data + 2, normal, avg_of_squares); 395 dt_divide_128(avg_of_squares, normal, avg_of_squares); 396 dt_divide_128(avg_of_squares, data[0], avg_of_squares); 397 398 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0]; 399 400 if (norm_avg < 0) 401 norm_avg = -norm_avg; 402 403 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg); 404 405 dt_subtract_128(avg_of_squares, square_of_avg, diff); 406 407 return (dt_sqrt_128(diff)); 408 } 409 410 static int 411 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last, 412 dtrace_bufdesc_t *buf, size_t offs) 413 { 414 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd; 415 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd; 416 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub; 417 dtrace_flowkind_t flow = DTRACEFLOW_NONE; 418 const char *str = NULL; 419 static const char *e_str[2] = { " -> ", " => " }; 420 static const char *r_str[2] = { " <- ", " <= " }; 421 static const char *ent = "entry", *ret = "return"; 422 static int entlen = 0, retlen = 0; 423 dtrace_epid_t next, id = epd->dtepd_epid; 424 int rval; 425 426 if (entlen == 0) { 427 assert(retlen == 0); 428 entlen = strlen(ent); 429 retlen = strlen(ret); 430 } 431 432 /* 433 * If the name of the probe is "entry" or ends with "-entry", we 434 * treat it as an entry; if it is "return" or ends with "-return", 435 * we treat it as a return. (This allows application-provided probes 436 * like "method-entry" or "function-entry" to participate in flow 437 * indentation -- without accidentally misinterpreting popular probe 438 * names like "carpentry", "gentry" or "Coventry".) 439 */ 440 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' && 441 (sub == n || sub[-1] == '-')) { 442 flow = DTRACEFLOW_ENTRY; 443 str = e_str[strcmp(p, "syscall") == 0]; 444 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' && 445 (sub == n || sub[-1] == '-')) { 446 flow = DTRACEFLOW_RETURN; 447 str = r_str[strcmp(p, "syscall") == 0]; 448 } 449 450 /* 451 * If we're going to indent this, we need to check the ID of our last 452 * call. If we're looking at the same probe ID but a different EPID, 453 * we _don't_ want to indent. (Yes, there are some minor holes in 454 * this scheme -- it's a heuristic.) 455 */ 456 if (flow == DTRACEFLOW_ENTRY) { 457 if ((last != DTRACE_EPIDNONE && id != last && 458 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id)) 459 flow = DTRACEFLOW_NONE; 460 } 461 462 /* 463 * If we're going to unindent this, it's more difficult to see if 464 * we don't actually want to unindent it -- we need to look at the 465 * _next_ EPID. 466 */ 467 if (flow == DTRACEFLOW_RETURN) { 468 offs += epd->dtepd_size; 469 470 do { 471 if (offs >= buf->dtbd_size) 472 goto out; 473 474 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 475 476 if (next == DTRACE_EPIDNONE) 477 offs += sizeof (id); 478 } while (next == DTRACE_EPIDNONE); 479 480 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0) 481 return (rval); 482 483 if (next != id && npd->dtpd_id == pd->dtpd_id) 484 flow = DTRACEFLOW_NONE; 485 } 486 487 out: 488 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) { 489 data->dtpda_prefix = str; 490 } else { 491 data->dtpda_prefix = "| "; 492 } 493 494 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0) 495 data->dtpda_indent -= 2; 496 497 data->dtpda_flow = flow; 498 499 return (0); 500 } 501 502 static int 503 dt_nullprobe() 504 { 505 return (DTRACE_CONSUME_THIS); 506 } 507 508 static int 509 dt_nullrec() 510 { 511 return (DTRACE_CONSUME_NEXT); 512 } 513 514 static void 515 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total) 516 { 517 long double val = dt_fabsl((long double)datum); 518 519 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) { 520 *total += val; 521 return; 522 } 523 524 /* 525 * If we're zooming in on an aggregation, we want the height of the 526 * highest value to be approximately 95% of total bar height -- so we 527 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to 528 * our highest value. 529 */ 530 val *= 1 / DTRACE_AGGZOOM_MAX; 531 532 if (*total < val) 533 *total = val; 534 } 535 536 static int 537 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width) 538 { 539 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n", 540 width ? width : 16, width ? "key" : "value", 541 "------------- Distribution -------------", "count")); 542 } 543 544 static int 545 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width, 546 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action) 547 { 548 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin; 549 int minwidth, maxwidth, i; 550 551 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE); 552 553 if (action == DTRACEAGG_QUANTIZE) { 554 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 555 min--; 556 557 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 558 max++; 559 560 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min)); 561 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max)); 562 } else { 563 maxwidth = 8; 564 minwidth = maxwidth - 1; 565 max++; 566 } 567 568 if (dt_printf(dtp, fp, "\n%*s %*s .", 569 width, width > 0 ? "key" : "", minwidth, "min") < 0) 570 return (-1); 571 572 for (i = min; i <= max; i++) { 573 if (dt_printf(dtp, fp, "-") < 0) 574 return (-1); 575 } 576 577 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max")); 578 } 579 580 /* 581 * We use a subset of the Unicode Block Elements (U+2588 through U+258F, 582 * inclusive) to represent aggregations via UTF-8 -- which are expressed via 583 * 3-byte UTF-8 sequences. 584 */ 585 #define DTRACE_AGGUTF8_FULL 0x2588 586 #define DTRACE_AGGUTF8_BASE 0x258f 587 #define DTRACE_AGGUTF8_LEVELS 8 588 589 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12)) 590 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f)) 591 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f)) 592 593 static int 594 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 595 uint64_t normal, long double total) 596 { 597 uint_t len = 40, i, whole, partial; 598 long double f = (dt_fabsl((long double)val) * len) / total; 599 const char *spaces = " "; 600 601 whole = (uint_t)f; 602 partial = (uint_t)((f - (long double)(uint_t)f) * 603 (long double)DTRACE_AGGUTF8_LEVELS); 604 605 if (dt_printf(dtp, fp, "|") < 0) 606 return (-1); 607 608 for (i = 0; i < whole; i++) { 609 if (dt_printf(dtp, fp, "%c%c%c", 610 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL), 611 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL), 612 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0) 613 return (-1); 614 } 615 616 if (partial != 0) { 617 partial = DTRACE_AGGUTF8_BASE - (partial - 1); 618 619 if (dt_printf(dtp, fp, "%c%c%c", 620 DTRACE_AGGUTF8_BYTE0(partial), 621 DTRACE_AGGUTF8_BYTE1(partial), 622 DTRACE_AGGUTF8_BYTE2(partial)) < 0) 623 return (-1); 624 625 i++; 626 } 627 628 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i, 629 (long long)val / normal)); 630 } 631 632 static int 633 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 634 uint64_t normal, long double total, char positives, char negatives) 635 { 636 long double f; 637 uint_t depth, len = 40; 638 639 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"; 640 const char *spaces = " "; 641 642 assert(strlen(ats) == len && strlen(spaces) == len); 643 assert(!(total == 0 && (positives || negatives))); 644 assert(!(val < 0 && !negatives)); 645 assert(!(val > 0 && !positives)); 646 assert(!(val != 0 && total == 0)); 647 648 if (!negatives) { 649 if (positives) { 650 if (dtp->dt_encoding == DT_ENCODING_UTF8) { 651 return (dt_print_quantline_utf8(dtp, fp, val, 652 normal, total)); 653 } 654 655 f = (dt_fabsl((long double)val) * len) / total; 656 depth = (uint_t)(f + 0.5); 657 } else { 658 depth = 0; 659 } 660 661 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth, 662 spaces + depth, (long long)val / normal)); 663 } 664 665 if (!positives) { 666 f = (dt_fabsl((long double)val) * len) / total; 667 depth = (uint_t)(f + 0.5); 668 669 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth, 670 ats + len - depth, (long long)val / normal)); 671 } 672 673 /* 674 * If we're here, we have both positive and negative bucket values. 675 * To express this graphically, we're going to generate both positive 676 * and negative bars separated by a centerline. These bars are half 677 * the size of normal quantize()/lquantize() bars, so we divide the 678 * length in half before calculating the bar length. 679 */ 680 len /= 2; 681 ats = &ats[len]; 682 spaces = &spaces[len]; 683 684 f = (dt_fabsl((long double)val) * len) / total; 685 depth = (uint_t)(f + 0.5); 686 687 if (val <= 0) { 688 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth, 689 ats + len - depth, len, "", (long long)val / normal)); 690 } else { 691 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "", 692 ats + len - depth, spaces + depth, 693 (long long)val / normal)); 694 } 695 } 696 697 /* 698 * As with UTF-8 printing of aggregations, we use a subset of the Unicode 699 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed 700 * aggregation. 701 */ 702 #define DTRACE_AGGPACK_BASE 0x2581 703 #define DTRACE_AGGPACK_LEVELS 8 704 705 static int 706 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp, 707 long double datum, long double total) 708 { 709 static boolean_t utf8_checked = B_FALSE; 710 static boolean_t utf8; 711 char *ascii = "__xxxxXX"; 712 char *neg = "vvvvVV"; 713 unsigned int len; 714 long double val; 715 716 if (!utf8_checked) { 717 char *term; 718 719 /* 720 * We want to determine if we can reasonably emit UTF-8 for our 721 * packed aggregation. To do this, we will check for terminals 722 * that are known to be primitive to emit UTF-8 on these. 723 */ 724 utf8_checked = B_TRUE; 725 726 if (dtp->dt_encoding == DT_ENCODING_ASCII) { 727 utf8 = B_FALSE; 728 } else if (dtp->dt_encoding == DT_ENCODING_UTF8) { 729 utf8 = B_TRUE; 730 } else if ((term = getenv("TERM")) != NULL && 731 (strcmp(term, "sun") == 0 || 732 strcmp(term, "sun-color") == 0 || 733 strcmp(term, "dumb") == 0)) { 734 utf8 = B_FALSE; 735 } else { 736 utf8 = B_TRUE; 737 } 738 } 739 740 if (datum == 0) 741 return (dt_printf(dtp, fp, " ")); 742 743 if (datum < 0) { 744 len = strlen(neg); 745 val = dt_fabsl(datum * (len - 1)) / total; 746 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)])); 747 } 748 749 if (utf8) { 750 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum * 751 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5); 752 753 return (dt_printf(dtp, fp, "%c%c%c", 754 DTRACE_AGGUTF8_BYTE0(block), 755 DTRACE_AGGUTF8_BYTE1(block), 756 DTRACE_AGGUTF8_BYTE2(block))); 757 } 758 759 len = strlen(ascii); 760 val = (datum * (len - 1)) / total; 761 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)])); 762 } 763 764 int 765 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 766 size_t size, uint64_t normal) 767 { 768 const int64_t *data = addr; 769 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1; 770 long double total = 0; 771 char positives = 0, negatives = 0; 772 773 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 774 return (dt_set_errno(dtp, EDT_DMISMATCH)); 775 776 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0) 777 first_bin++; 778 779 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) { 780 /* 781 * There isn't any data. This is possible if the aggregation 782 * has been clear()'d or if negative increment values have been 783 * used. Regardless, we'll print the buckets around 0. 784 */ 785 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1; 786 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1; 787 } else { 788 if (first_bin > 0) 789 first_bin--; 790 791 while (last_bin > 0 && data[last_bin] == 0) 792 last_bin--; 793 794 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1) 795 last_bin++; 796 } 797 798 for (i = first_bin; i <= last_bin; i++) { 799 positives |= (data[i] > 0); 800 negatives |= (data[i] < 0); 801 dt_quantize_total(dtp, data[i], &total); 802 } 803 804 if (dt_print_quanthdr(dtp, fp, 0) < 0) 805 return (-1); 806 807 for (i = first_bin; i <= last_bin; i++) { 808 if (dt_printf(dtp, fp, "%16lld ", 809 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0) 810 return (-1); 811 812 if (dt_print_quantline(dtp, fp, data[i], normal, total, 813 positives, negatives) < 0) 814 return (-1); 815 } 816 817 return (0); 818 } 819 820 int 821 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 822 size_t size, const dtrace_aggdata_t *aggdata) 823 { 824 const int64_t *data = addr; 825 long double total = 0, count = 0; 826 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i; 827 int64_t minval, maxval; 828 829 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 830 return (dt_set_errno(dtp, EDT_DMISMATCH)); 831 832 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 833 min--; 834 835 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 836 max++; 837 838 minval = DTRACE_QUANTIZE_BUCKETVAL(min); 839 maxval = DTRACE_QUANTIZE_BUCKETVAL(max); 840 841 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval), 842 (long long)minval) < 0) 843 return (-1); 844 845 for (i = min; i <= max; i++) { 846 dt_quantize_total(dtp, data[i], &total); 847 count += data[i]; 848 } 849 850 for (i = min; i <= max; i++) { 851 if (dt_print_packed(dtp, fp, data[i], total) < 0) 852 return (-1); 853 } 854 855 if (dt_printf(dtp, fp, ": %*lld | %lld\n", 856 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0) 857 return (-1); 858 859 return (0); 860 } 861 862 int 863 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 864 size_t size, uint64_t normal) 865 { 866 const int64_t *data = addr; 867 int i, first_bin, last_bin, base; 868 uint64_t arg; 869 long double total = 0; 870 uint16_t step, levels; 871 char positives = 0, negatives = 0; 872 873 if (size < sizeof (uint64_t)) 874 return (dt_set_errno(dtp, EDT_DMISMATCH)); 875 876 arg = *data++; 877 size -= sizeof (uint64_t); 878 879 base = DTRACE_LQUANTIZE_BASE(arg); 880 step = DTRACE_LQUANTIZE_STEP(arg); 881 levels = DTRACE_LQUANTIZE_LEVELS(arg); 882 883 first_bin = 0; 884 last_bin = levels + 1; 885 886 if (size != sizeof (uint64_t) * (levels + 2)) 887 return (dt_set_errno(dtp, EDT_DMISMATCH)); 888 889 while (first_bin <= levels + 1 && data[first_bin] == 0) 890 first_bin++; 891 892 if (first_bin > levels + 1) { 893 first_bin = 0; 894 last_bin = 2; 895 } else { 896 if (first_bin > 0) 897 first_bin--; 898 899 while (last_bin > 0 && data[last_bin] == 0) 900 last_bin--; 901 902 if (last_bin < levels + 1) 903 last_bin++; 904 } 905 906 for (i = first_bin; i <= last_bin; i++) { 907 positives |= (data[i] > 0); 908 negatives |= (data[i] < 0); 909 dt_quantize_total(dtp, data[i], &total); 910 } 911 912 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 913 "------------- Distribution -------------", "count") < 0) 914 return (-1); 915 916 for (i = first_bin; i <= last_bin; i++) { 917 char c[32]; 918 int err; 919 920 if (i == 0) { 921 (void) snprintf(c, sizeof (c), "< %d", base); 922 err = dt_printf(dtp, fp, "%16s ", c); 923 } else if (i == levels + 1) { 924 (void) snprintf(c, sizeof (c), ">= %d", 925 base + (levels * step)); 926 err = dt_printf(dtp, fp, "%16s ", c); 927 } else { 928 err = dt_printf(dtp, fp, "%16d ", 929 base + (i - 1) * step); 930 } 931 932 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal, 933 total, positives, negatives) < 0) 934 return (-1); 935 } 936 937 return (0); 938 } 939 940 /*ARGSUSED*/ 941 int 942 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 943 size_t size, const dtrace_aggdata_t *aggdata) 944 { 945 const int64_t *data = addr; 946 long double total = 0, count = 0; 947 int min, max, base, err; 948 uint64_t arg; 949 uint16_t step, levels; 950 char c[32]; 951 unsigned int i; 952 953 if (size < sizeof (uint64_t)) 954 return (dt_set_errno(dtp, EDT_DMISMATCH)); 955 956 arg = *data++; 957 size -= sizeof (uint64_t); 958 959 base = DTRACE_LQUANTIZE_BASE(arg); 960 step = DTRACE_LQUANTIZE_STEP(arg); 961 levels = DTRACE_LQUANTIZE_LEVELS(arg); 962 963 if (size != sizeof (uint64_t) * (levels + 2)) 964 return (dt_set_errno(dtp, EDT_DMISMATCH)); 965 966 min = 0; 967 max = levels + 1; 968 969 if (min == 0) { 970 (void) snprintf(c, sizeof (c), "< %d", base); 971 err = dt_printf(dtp, fp, "%8s :", c); 972 } else { 973 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step); 974 } 975 976 if (err < 0) 977 return (-1); 978 979 for (i = min; i <= max; i++) { 980 dt_quantize_total(dtp, data[i], &total); 981 count += data[i]; 982 } 983 984 for (i = min; i <= max; i++) { 985 if (dt_print_packed(dtp, fp, data[i], total) < 0) 986 return (-1); 987 } 988 989 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step)); 990 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count)); 991 } 992 993 int 994 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 995 size_t size, uint64_t normal) 996 { 997 int i, first_bin, last_bin, bin = 1, order, levels; 998 uint16_t factor, low, high, nsteps; 999 const int64_t *data = addr; 1000 int64_t value = 1, next, step; 1001 char positives = 0, negatives = 0; 1002 long double total = 0; 1003 uint64_t arg; 1004 char c[32]; 1005 1006 if (size < sizeof (uint64_t)) 1007 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1008 1009 arg = *data++; 1010 size -= sizeof (uint64_t); 1011 1012 factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1013 low = DTRACE_LLQUANTIZE_LOW(arg); 1014 high = DTRACE_LLQUANTIZE_HIGH(arg); 1015 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1016 1017 /* 1018 * We don't expect to be handed invalid llquantize() parameters here, 1019 * but sanity check them (to a degree) nonetheless. 1020 */ 1021 if (size > INT32_MAX || factor < 2 || low >= high || 1022 nsteps == 0 || factor > nsteps) 1023 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1024 1025 levels = (int)size / sizeof (uint64_t); 1026 1027 first_bin = 0; 1028 last_bin = levels - 1; 1029 1030 while (first_bin < levels && data[first_bin] == 0) 1031 first_bin++; 1032 1033 if (first_bin == levels) { 1034 first_bin = 0; 1035 last_bin = 1; 1036 } else { 1037 if (first_bin > 0) 1038 first_bin--; 1039 1040 while (last_bin > 0 && data[last_bin] == 0) 1041 last_bin--; 1042 1043 if (last_bin < levels - 1) 1044 last_bin++; 1045 } 1046 1047 for (i = first_bin; i <= last_bin; i++) { 1048 positives |= (data[i] > 0); 1049 negatives |= (data[i] < 0); 1050 dt_quantize_total(dtp, data[i], &total); 1051 } 1052 1053 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 1054 "------------- Distribution -------------", "count") < 0) 1055 return (-1); 1056 1057 for (order = 0; order < low; order++) 1058 value *= factor; 1059 1060 next = value * factor; 1061 step = next > nsteps ? next / nsteps : 1; 1062 1063 if (first_bin == 0) { 1064 (void) snprintf(c, sizeof (c), "< %lld", (long long)value); 1065 1066 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1067 return (-1); 1068 1069 if (dt_print_quantline(dtp, fp, data[0], normal, 1070 total, positives, negatives) < 0) 1071 return (-1); 1072 } 1073 1074 while (order <= high) { 1075 if (bin >= first_bin && bin <= last_bin) { 1076 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0) 1077 return (-1); 1078 1079 if (dt_print_quantline(dtp, fp, data[bin], 1080 normal, total, positives, negatives) < 0) 1081 return (-1); 1082 } 1083 1084 assert(value < next); 1085 bin++; 1086 1087 if ((value += step) != next) 1088 continue; 1089 1090 next = value * factor; 1091 step = next > nsteps ? next / nsteps : 1; 1092 order++; 1093 } 1094 1095 if (last_bin < bin) 1096 return (0); 1097 1098 assert(last_bin == bin); 1099 (void) snprintf(c, sizeof (c), ">= %lld", (long long)value); 1100 1101 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1102 return (-1); 1103 1104 return (dt_print_quantline(dtp, fp, data[bin], normal, 1105 total, positives, negatives)); 1106 } 1107 1108 /*ARGSUSED*/ 1109 static int 1110 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1111 size_t size, uint64_t normal) 1112 { 1113 /* LINTED - alignment */ 1114 int64_t *data = (int64_t *)addr; 1115 1116 return (dt_printf(dtp, fp, " %16lld", data[0] ? 1117 (long long)(data[1] / (int64_t)normal / data[0]) : 0)); 1118 } 1119 1120 /*ARGSUSED*/ 1121 static int 1122 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1123 size_t size, uint64_t normal) 1124 { 1125 /* LINTED - alignment */ 1126 uint64_t *data = (uint64_t *)addr; 1127 1128 return (dt_printf(dtp, fp, " %16llu", data[0] ? 1129 (unsigned long long) dt_stddev(data, normal) : 0)); 1130 } 1131 1132 /*ARGSUSED*/ 1133 static int 1134 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1135 size_t nbytes, int width, int quiet, int forceraw) 1136 { 1137 /* 1138 * If the byte stream is a series of printable characters, followed by 1139 * a terminating byte, we print it out as a string. Otherwise, we 1140 * assume that it's something else and just print the bytes. 1141 */ 1142 int i, j, margin = 5; 1143 char *c = (char *)addr; 1144 1145 if (nbytes == 0) 1146 return (0); 1147 1148 if (forceraw) 1149 goto raw; 1150 1151 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET) 1152 goto raw; 1153 1154 for (i = 0; i < nbytes; i++) { 1155 /* 1156 * We define a "printable character" to be one for which 1157 * isprint(3C) returns non-zero, isspace(3C) returns non-zero, 1158 * or a character which is either backspace or the bell. 1159 * Backspace and the bell are regrettably special because 1160 * they fail the first two tests -- and yet they are entirely 1161 * printable. These are the only two control characters that 1162 * have meaning for the terminal and for which isprint(3C) and 1163 * isspace(3C) return 0. 1164 */ 1165 if (isprint(c[i]) || isspace(c[i]) || 1166 c[i] == '\b' || c[i] == '\a') 1167 continue; 1168 1169 if (c[i] == '\0' && i > 0) { 1170 /* 1171 * This looks like it might be a string. Before we 1172 * assume that it is indeed a string, check the 1173 * remainder of the byte range; if it contains 1174 * additional non-nul characters, we'll assume that 1175 * it's a binary stream that just happens to look like 1176 * a string, and we'll print out the individual bytes. 1177 */ 1178 for (j = i + 1; j < nbytes; j++) { 1179 if (c[j] != '\0') 1180 break; 1181 } 1182 1183 if (j != nbytes) 1184 break; 1185 1186 if (quiet) { 1187 return (dt_printf(dtp, fp, "%s", c)); 1188 } else { 1189 return (dt_printf(dtp, fp, " %s%*s", 1190 width < 0 ? " " : "", width, c)); 1191 } 1192 } 1193 1194 break; 1195 } 1196 1197 if (i == nbytes) { 1198 /* 1199 * The byte range is all printable characters, but there is 1200 * no trailing nul byte. We'll assume that it's a string and 1201 * print it as such. 1202 */ 1203 char *s = alloca(nbytes + 1); 1204 bcopy(c, s, nbytes); 1205 s[nbytes] = '\0'; 1206 return (dt_printf(dtp, fp, " %-*s", width, s)); 1207 } 1208 1209 raw: 1210 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0) 1211 return (-1); 1212 1213 for (i = 0; i < 16; i++) 1214 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0) 1215 return (-1); 1216 1217 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0) 1218 return (-1); 1219 1220 1221 for (i = 0; i < nbytes; i += 16) { 1222 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0) 1223 return (-1); 1224 1225 for (j = i; j < i + 16 && j < nbytes; j++) { 1226 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0) 1227 return (-1); 1228 } 1229 1230 while (j++ % 16) { 1231 if (dt_printf(dtp, fp, " ") < 0) 1232 return (-1); 1233 } 1234 1235 if (dt_printf(dtp, fp, " ") < 0) 1236 return (-1); 1237 1238 for (j = i; j < i + 16 && j < nbytes; j++) { 1239 if (dt_printf(dtp, fp, "%c", 1240 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0) 1241 return (-1); 1242 } 1243 1244 if (dt_printf(dtp, fp, "\n") < 0) 1245 return (-1); 1246 } 1247 1248 return (0); 1249 } 1250 1251 int 1252 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1253 caddr_t addr, int depth, int size) 1254 { 1255 dtrace_syminfo_t dts; 1256 GElf_Sym sym; 1257 int i, indent; 1258 char c[PATH_MAX * 2]; 1259 uint64_t pc; 1260 1261 if (dt_printf(dtp, fp, "\n") < 0) 1262 return (-1); 1263 1264 if (format == NULL) 1265 format = "%s"; 1266 1267 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1268 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1269 else 1270 indent = _dtrace_stkindent; 1271 1272 for (i = 0; i < depth; i++) { 1273 switch (size) { 1274 case sizeof (uint32_t): 1275 /* LINTED - alignment */ 1276 pc = *((uint32_t *)addr); 1277 break; 1278 1279 case sizeof (uint64_t): 1280 /* LINTED - alignment */ 1281 pc = *((uint64_t *)addr); 1282 break; 1283 1284 default: 1285 return (dt_set_errno(dtp, EDT_BADSTACKPC)); 1286 } 1287 1288 if (pc == 0) 1289 break; 1290 1291 addr += size; 1292 1293 if (dt_printf(dtp, fp, "%*s", indent, "") < 0) 1294 return (-1); 1295 1296 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1297 if (pc > sym.st_value) { 1298 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx", 1299 dts.dts_object, dts.dts_name, 1300 (u_longlong_t)(pc - sym.st_value)); 1301 } else { 1302 (void) snprintf(c, sizeof (c), "%s`%s", 1303 dts.dts_object, dts.dts_name); 1304 } 1305 } else { 1306 /* 1307 * We'll repeat the lookup, but this time we'll specify 1308 * a NULL GElf_Sym -- indicating that we're only 1309 * interested in the containing module. 1310 */ 1311 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1312 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1313 dts.dts_object, (u_longlong_t)pc); 1314 } else { 1315 (void) snprintf(c, sizeof (c), "0x%llx", 1316 (u_longlong_t)pc); 1317 } 1318 } 1319 1320 if (dt_printf(dtp, fp, format, c) < 0) 1321 return (-1); 1322 1323 if (dt_printf(dtp, fp, "\n") < 0) 1324 return (-1); 1325 } 1326 1327 return (0); 1328 } 1329 1330 int 1331 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1332 caddr_t addr, uint64_t arg) 1333 { 1334 /* LINTED - alignment */ 1335 uint64_t *pc = (uint64_t *)addr; 1336 uint32_t depth = DTRACE_USTACK_NFRAMES(arg); 1337 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg); 1338 const char *strbase = addr + (depth + 1) * sizeof (uint64_t); 1339 const char *str = strsize ? strbase : NULL; 1340 int err = 0; 1341 1342 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2]; 1343 struct ps_prochandle *P; 1344 GElf_Sym sym; 1345 int i, indent; 1346 pid_t pid; 1347 1348 if (depth == 0) 1349 return (0); 1350 1351 pid = (pid_t)*pc++; 1352 1353 if (dt_printf(dtp, fp, "\n") < 0) 1354 return (-1); 1355 1356 if (format == NULL) 1357 format = "%s"; 1358 1359 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1360 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1361 else 1362 indent = _dtrace_stkindent; 1363 1364 /* 1365 * Ultimately, we need to add an entry point in the library vector for 1366 * determining <symbol, offset> from <pid, address>. For now, if 1367 * this is a vector open, we just print the raw address or string. 1368 */ 1369 if (dtp->dt_vector == NULL) 1370 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1371 else 1372 P = NULL; 1373 1374 if (P != NULL) 1375 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1376 1377 for (i = 0; i < depth && pc[i] != 0; i++) { 1378 const prmap_t *map; 1379 1380 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1381 break; 1382 1383 if (P != NULL && Plookup_by_addr(P, pc[i], 1384 name, sizeof (name), &sym) == 0) { 1385 (void) Pobjname(P, pc[i], objname, sizeof (objname)); 1386 1387 if (pc[i] > sym.st_value) { 1388 (void) snprintf(c, sizeof (c), 1389 "%s`%s+0x%llx", dt_basename(objname), name, 1390 (u_longlong_t)(pc[i] - sym.st_value)); 1391 } else { 1392 (void) snprintf(c, sizeof (c), 1393 "%s`%s", dt_basename(objname), name); 1394 } 1395 } else if (str != NULL && str[0] != '\0' && str[0] != '@' && 1396 (P != NULL && ((map = Paddr_to_map(P, pc[i])) == NULL || 1397 (map->pr_mflags & MA_WRITE)))) { 1398 /* 1399 * If the current string pointer in the string table 1400 * does not point to an empty string _and_ the program 1401 * counter falls in a writable region, we'll use the 1402 * string from the string table instead of the raw 1403 * address. This last condition is necessary because 1404 * some (broken) ustack helpers will return a string 1405 * even for a program counter that they can't 1406 * identify. If we have a string for a program 1407 * counter that falls in a segment that isn't 1408 * writable, we assume that we have fallen into this 1409 * case and we refuse to use the string. 1410 */ 1411 (void) snprintf(c, sizeof (c), "%s", str); 1412 } else { 1413 if (P != NULL && Pobjname(P, pc[i], objname, 1414 sizeof (objname)) != 0) { 1415 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1416 dt_basename(objname), (u_longlong_t)pc[i]); 1417 } else { 1418 (void) snprintf(c, sizeof (c), "0x%llx", 1419 (u_longlong_t)pc[i]); 1420 } 1421 } 1422 1423 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1424 break; 1425 1426 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1427 break; 1428 1429 if (str != NULL && str[0] == '@') { 1430 /* 1431 * If the first character of the string is an "at" sign, 1432 * then the string is inferred to be an annotation -- 1433 * and it is printed out beneath the frame and offset 1434 * with brackets. 1435 */ 1436 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1437 break; 1438 1439 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]); 1440 1441 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1442 break; 1443 1444 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1445 break; 1446 } 1447 1448 if (str != NULL) { 1449 str += strlen(str) + 1; 1450 if (str - strbase >= strsize) 1451 str = NULL; 1452 } 1453 } 1454 1455 if (P != NULL) { 1456 dt_proc_unlock(dtp, P); 1457 dt_proc_release(dtp, P); 1458 } 1459 1460 return (err); 1461 } 1462 1463 static int 1464 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act) 1465 { 1466 /* LINTED - alignment */ 1467 uint64_t pid = ((uint64_t *)addr)[0]; 1468 /* LINTED - alignment */ 1469 uint64_t pc = ((uint64_t *)addr)[1]; 1470 const char *format = " %-50s"; 1471 char *s; 1472 int n, len = 256; 1473 1474 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) { 1475 struct ps_prochandle *P; 1476 1477 if ((P = dt_proc_grab(dtp, pid, 1478 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) { 1479 GElf_Sym sym; 1480 1481 dt_proc_lock(dtp, P); 1482 1483 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0) 1484 pc = sym.st_value; 1485 1486 dt_proc_unlock(dtp, P); 1487 dt_proc_release(dtp, P); 1488 } 1489 } 1490 1491 do { 1492 n = len; 1493 s = alloca(n); 1494 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n); 1495 1496 return (dt_printf(dtp, fp, format, s)); 1497 } 1498 1499 int 1500 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1501 { 1502 /* LINTED - alignment */ 1503 uint64_t pid = ((uint64_t *)addr)[0]; 1504 /* LINTED - alignment */ 1505 uint64_t pc = ((uint64_t *)addr)[1]; 1506 int err = 0; 1507 1508 char objname[PATH_MAX], c[PATH_MAX * 2]; 1509 struct ps_prochandle *P; 1510 1511 if (format == NULL) 1512 format = " %-50s"; 1513 1514 /* 1515 * See the comment in dt_print_ustack() for the rationale for 1516 * printing raw addresses in the vectored case. 1517 */ 1518 if (dtp->dt_vector == NULL) 1519 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1520 else 1521 P = NULL; 1522 1523 if (P != NULL) 1524 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1525 1526 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != 0) { 1527 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname)); 1528 } else { 1529 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1530 } 1531 1532 err = dt_printf(dtp, fp, format, c); 1533 1534 if (P != NULL) { 1535 dt_proc_unlock(dtp, P); 1536 dt_proc_release(dtp, P); 1537 } 1538 1539 return (err); 1540 } 1541 1542 static int 1543 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1544 { 1545 /* LINTED - alignment */ 1546 uint64_t pc = *((uint64_t *)addr); 1547 dtrace_syminfo_t dts; 1548 GElf_Sym sym; 1549 char c[PATH_MAX * 2]; 1550 1551 if (format == NULL) 1552 format = " %-50s"; 1553 1554 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1555 (void) snprintf(c, sizeof (c), "%s`%s", 1556 dts.dts_object, dts.dts_name); 1557 } else { 1558 /* 1559 * We'll repeat the lookup, but this time we'll specify a 1560 * NULL GElf_Sym -- indicating that we're only interested in 1561 * the containing module. 1562 */ 1563 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1564 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1565 dts.dts_object, (u_longlong_t)pc); 1566 } else { 1567 (void) snprintf(c, sizeof (c), "0x%llx", 1568 (u_longlong_t)pc); 1569 } 1570 } 1571 1572 if (dt_printf(dtp, fp, format, c) < 0) 1573 return (-1); 1574 1575 return (0); 1576 } 1577 1578 int 1579 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1580 { 1581 /* LINTED - alignment */ 1582 uint64_t pc = *((uint64_t *)addr); 1583 dtrace_syminfo_t dts; 1584 char c[PATH_MAX * 2]; 1585 1586 if (format == NULL) 1587 format = " %-50s"; 1588 1589 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1590 (void) snprintf(c, sizeof (c), "%s", dts.dts_object); 1591 } else { 1592 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc); 1593 } 1594 1595 if (dt_printf(dtp, fp, format, c) < 0) 1596 return (-1); 1597 1598 return (0); 1599 } 1600 1601 static int 1602 dt_print_memory(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr) 1603 { 1604 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 1605 size_t nbytes = *((uintptr_t *) addr); 1606 1607 return (dt_print_bytes(dtp, fp, addr + sizeof(uintptr_t), 1608 nbytes, 50, quiet, 1)); 1609 } 1610 1611 typedef struct dt_normal { 1612 dtrace_aggvarid_t dtnd_id; 1613 uint64_t dtnd_normal; 1614 } dt_normal_t; 1615 1616 static int 1617 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1618 { 1619 dt_normal_t *normal = arg; 1620 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1621 dtrace_aggvarid_t id = normal->dtnd_id; 1622 1623 if (agg->dtagd_nrecs == 0) 1624 return (DTRACE_AGGWALK_NEXT); 1625 1626 if (agg->dtagd_varid != id) 1627 return (DTRACE_AGGWALK_NEXT); 1628 1629 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal; 1630 return (DTRACE_AGGWALK_NORMALIZE); 1631 } 1632 1633 static int 1634 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1635 { 1636 dt_normal_t normal; 1637 caddr_t addr; 1638 1639 /* 1640 * We (should) have two records: the aggregation ID followed by the 1641 * normalization value. 1642 */ 1643 addr = base + rec->dtrd_offset; 1644 1645 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1646 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1647 1648 /* LINTED - alignment */ 1649 normal.dtnd_id = *((dtrace_aggvarid_t *)addr); 1650 rec++; 1651 1652 if (rec->dtrd_action != DTRACEACT_LIBACT) 1653 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1654 1655 if (rec->dtrd_arg != DT_ACT_NORMALIZE) 1656 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1657 1658 addr = base + rec->dtrd_offset; 1659 1660 switch (rec->dtrd_size) { 1661 case sizeof (uint64_t): 1662 /* LINTED - alignment */ 1663 normal.dtnd_normal = *((uint64_t *)addr); 1664 break; 1665 case sizeof (uint32_t): 1666 /* LINTED - alignment */ 1667 normal.dtnd_normal = *((uint32_t *)addr); 1668 break; 1669 case sizeof (uint16_t): 1670 /* LINTED - alignment */ 1671 normal.dtnd_normal = *((uint16_t *)addr); 1672 break; 1673 case sizeof (uint8_t): 1674 normal.dtnd_normal = *((uint8_t *)addr); 1675 break; 1676 default: 1677 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1678 } 1679 1680 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal); 1681 1682 return (0); 1683 } 1684 1685 static int 1686 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1687 { 1688 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1689 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1690 1691 if (agg->dtagd_nrecs == 0) 1692 return (DTRACE_AGGWALK_NEXT); 1693 1694 if (agg->dtagd_varid != id) 1695 return (DTRACE_AGGWALK_NEXT); 1696 1697 return (DTRACE_AGGWALK_DENORMALIZE); 1698 } 1699 1700 static int 1701 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg) 1702 { 1703 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1704 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1705 1706 if (agg->dtagd_nrecs == 0) 1707 return (DTRACE_AGGWALK_NEXT); 1708 1709 if (agg->dtagd_varid != id) 1710 return (DTRACE_AGGWALK_NEXT); 1711 1712 return (DTRACE_AGGWALK_CLEAR); 1713 } 1714 1715 typedef struct dt_trunc { 1716 dtrace_aggvarid_t dttd_id; 1717 uint64_t dttd_remaining; 1718 } dt_trunc_t; 1719 1720 static int 1721 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg) 1722 { 1723 dt_trunc_t *trunc = arg; 1724 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1725 dtrace_aggvarid_t id = trunc->dttd_id; 1726 1727 if (agg->dtagd_nrecs == 0) 1728 return (DTRACE_AGGWALK_NEXT); 1729 1730 if (agg->dtagd_varid != id) 1731 return (DTRACE_AGGWALK_NEXT); 1732 1733 if (trunc->dttd_remaining == 0) 1734 return (DTRACE_AGGWALK_REMOVE); 1735 1736 trunc->dttd_remaining--; 1737 return (DTRACE_AGGWALK_NEXT); 1738 } 1739 1740 static int 1741 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1742 { 1743 dt_trunc_t trunc; 1744 caddr_t addr; 1745 int64_t remaining; 1746 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *); 1747 1748 /* 1749 * We (should) have two records: the aggregation ID followed by the 1750 * number of aggregation entries after which the aggregation is to be 1751 * truncated. 1752 */ 1753 addr = base + rec->dtrd_offset; 1754 1755 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1756 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1757 1758 /* LINTED - alignment */ 1759 trunc.dttd_id = *((dtrace_aggvarid_t *)addr); 1760 rec++; 1761 1762 if (rec->dtrd_action != DTRACEACT_LIBACT) 1763 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1764 1765 if (rec->dtrd_arg != DT_ACT_TRUNC) 1766 return (dt_set_errno(dtp, EDT_BADTRUNC)); 1767 1768 addr = base + rec->dtrd_offset; 1769 1770 switch (rec->dtrd_size) { 1771 case sizeof (uint64_t): 1772 /* LINTED - alignment */ 1773 remaining = *((int64_t *)addr); 1774 break; 1775 case sizeof (uint32_t): 1776 /* LINTED - alignment */ 1777 remaining = *((int32_t *)addr); 1778 break; 1779 case sizeof (uint16_t): 1780 /* LINTED - alignment */ 1781 remaining = *((int16_t *)addr); 1782 break; 1783 case sizeof (uint8_t): 1784 remaining = *((int8_t *)addr); 1785 break; 1786 default: 1787 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1788 } 1789 1790 if (remaining < 0) { 1791 func = dtrace_aggregate_walk_valsorted; 1792 remaining = -remaining; 1793 } else { 1794 func = dtrace_aggregate_walk_valrevsorted; 1795 } 1796 1797 assert(remaining >= 0); 1798 trunc.dttd_remaining = remaining; 1799 1800 (void) func(dtp, dt_trunc_agg, &trunc); 1801 1802 return (0); 1803 } 1804 1805 static int 1806 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec, 1807 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata, 1808 uint64_t normal, dt_print_aggdata_t *pd) 1809 { 1810 int err, width; 1811 dtrace_actkind_t act = rec->dtrd_action; 1812 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack; 1813 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1814 1815 static struct { 1816 size_t size; 1817 int width; 1818 int packedwidth; 1819 } *fmt, fmttab[] = { 1820 { sizeof (uint8_t), 3, 3 }, 1821 { sizeof (uint16_t), 5, 5 }, 1822 { sizeof (uint32_t), 8, 8 }, 1823 { sizeof (uint64_t), 16, 16 }, 1824 { 0, -50, 16 } 1825 }; 1826 1827 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) { 1828 dtrace_recdesc_t *r; 1829 1830 width = 0; 1831 1832 /* 1833 * To print our quantization header for either an agghist or 1834 * aggpack aggregation, we need to iterate through all of our 1835 * of our records to determine their width. 1836 */ 1837 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) { 1838 for (fmt = fmttab; fmt->size && 1839 fmt->size != r->dtrd_size; fmt++) 1840 continue; 1841 1842 width += fmt->packedwidth + 1; 1843 } 1844 1845 if (pd->dtpa_agghist) { 1846 if (dt_print_quanthdr(dtp, fp, width) < 0) 1847 return (-1); 1848 } else { 1849 if (dt_print_quanthdr_packed(dtp, fp, 1850 width, aggdata, r->dtrd_action) < 0) 1851 return (-1); 1852 } 1853 1854 pd->dtpa_agghisthdr = agg->dtagd_varid; 1855 } 1856 1857 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) { 1858 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES; 1859 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES; 1860 int64_t val; 1861 1862 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT); 1863 val = (long long)*((uint64_t *)addr); 1864 1865 if (dt_printf(dtp, fp, " ") < 0) 1866 return (-1); 1867 1868 return (dt_print_quantline(dtp, fp, val, normal, 1869 aggdata->dtada_total, positives, negatives)); 1870 } 1871 1872 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) { 1873 switch (act) { 1874 case DTRACEAGG_QUANTIZE: 1875 return (dt_print_quantize_packed(dtp, 1876 fp, addr, size, aggdata)); 1877 case DTRACEAGG_LQUANTIZE: 1878 return (dt_print_lquantize_packed(dtp, 1879 fp, addr, size, aggdata)); 1880 default: 1881 break; 1882 } 1883 } 1884 1885 switch (act) { 1886 case DTRACEACT_STACK: 1887 return (dt_print_stack(dtp, fp, NULL, addr, 1888 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg)); 1889 1890 case DTRACEACT_USTACK: 1891 case DTRACEACT_JSTACK: 1892 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg)); 1893 1894 case DTRACEACT_USYM: 1895 case DTRACEACT_UADDR: 1896 return (dt_print_usym(dtp, fp, addr, act)); 1897 1898 case DTRACEACT_UMOD: 1899 return (dt_print_umod(dtp, fp, NULL, addr)); 1900 1901 case DTRACEACT_SYM: 1902 return (dt_print_sym(dtp, fp, NULL, addr)); 1903 1904 case DTRACEACT_MOD: 1905 return (dt_print_mod(dtp, fp, NULL, addr)); 1906 1907 case DTRACEAGG_QUANTIZE: 1908 return (dt_print_quantize(dtp, fp, addr, size, normal)); 1909 1910 case DTRACEAGG_LQUANTIZE: 1911 return (dt_print_lquantize(dtp, fp, addr, size, normal)); 1912 1913 case DTRACEAGG_LLQUANTIZE: 1914 return (dt_print_llquantize(dtp, fp, addr, size, normal)); 1915 1916 case DTRACEAGG_AVG: 1917 return (dt_print_average(dtp, fp, addr, size, normal)); 1918 1919 case DTRACEAGG_STDDEV: 1920 return (dt_print_stddev(dtp, fp, addr, size, normal)); 1921 1922 default: 1923 break; 1924 } 1925 1926 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++) 1927 continue; 1928 1929 width = packed ? fmt->packedwidth : fmt->width; 1930 1931 switch (size) { 1932 case sizeof (uint64_t): 1933 err = dt_printf(dtp, fp, " %*lld", width, 1934 /* LINTED - alignment */ 1935 (long long)*((uint64_t *)addr) / normal); 1936 break; 1937 case sizeof (uint32_t): 1938 /* LINTED - alignment */ 1939 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) / 1940 (uint32_t)normal); 1941 break; 1942 case sizeof (uint16_t): 1943 /* LINTED - alignment */ 1944 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) / 1945 (uint32_t)normal); 1946 break; 1947 case sizeof (uint8_t): 1948 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) / 1949 (uint32_t)normal); 1950 break; 1951 default: 1952 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0); 1953 break; 1954 } 1955 1956 return (err); 1957 } 1958 1959 int 1960 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg) 1961 { 1962 int i, aggact = 0; 1963 dt_print_aggdata_t *pd = arg; 1964 const dtrace_aggdata_t *aggdata = aggsdata[0]; 1965 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1966 FILE *fp = pd->dtpa_fp; 1967 dtrace_hdl_t *dtp = pd->dtpa_dtp; 1968 dtrace_recdesc_t *rec; 1969 dtrace_actkind_t act; 1970 caddr_t addr; 1971 size_t size; 1972 1973 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL); 1974 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN); 1975 1976 /* 1977 * Iterate over each record description in the key, printing the traced 1978 * data, skipping the first datum (the tuple member created by the 1979 * compiler). 1980 */ 1981 for (i = 1; i < agg->dtagd_nrecs; i++) { 1982 rec = &agg->dtagd_rec[i]; 1983 act = rec->dtrd_action; 1984 addr = aggdata->dtada_data + rec->dtrd_offset; 1985 size = rec->dtrd_size; 1986 1987 if (DTRACEACT_ISAGG(act)) { 1988 aggact = i; 1989 break; 1990 } 1991 1992 if (dt_print_datum(dtp, fp, rec, addr, 1993 size, aggdata, 1, pd) < 0) 1994 return (-1); 1995 1996 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 1997 DTRACE_BUFDATA_AGGKEY) < 0) 1998 return (-1); 1999 } 2000 2001 assert(aggact != 0); 2002 2003 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) { 2004 uint64_t normal; 2005 2006 aggdata = aggsdata[i]; 2007 agg = aggdata->dtada_desc; 2008 rec = &agg->dtagd_rec[aggact]; 2009 act = rec->dtrd_action; 2010 addr = aggdata->dtada_data + rec->dtrd_offset; 2011 size = rec->dtrd_size; 2012 2013 assert(DTRACEACT_ISAGG(act)); 2014 normal = aggdata->dtada_normal; 2015 2016 if (dt_print_datum(dtp, fp, rec, addr, 2017 size, aggdata, normal, pd) < 0) 2018 return (-1); 2019 2020 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 2021 DTRACE_BUFDATA_AGGVAL) < 0) 2022 return (-1); 2023 2024 if (!pd->dtpa_allunprint) 2025 agg->dtagd_flags |= DTRACE_AGD_PRINTED; 2026 } 2027 2028 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) { 2029 if (dt_printf(dtp, fp, "\n") < 0) 2030 return (-1); 2031 } 2032 2033 if (dt_buffered_flush(dtp, NULL, NULL, aggdata, 2034 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0) 2035 return (-1); 2036 2037 return (0); 2038 } 2039 2040 int 2041 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg) 2042 { 2043 dt_print_aggdata_t *pd = arg; 2044 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2045 dtrace_aggvarid_t aggvarid = pd->dtpa_id; 2046 2047 if (pd->dtpa_allunprint) { 2048 if (agg->dtagd_flags & DTRACE_AGD_PRINTED) 2049 return (0); 2050 } else { 2051 /* 2052 * If we're not printing all unprinted aggregations, then the 2053 * aggregation variable ID denotes a specific aggregation 2054 * variable that we should print -- skip any other aggregations 2055 * that we encounter. 2056 */ 2057 if (agg->dtagd_nrecs == 0) 2058 return (0); 2059 2060 if (aggvarid != agg->dtagd_varid) 2061 return (0); 2062 } 2063 2064 return (dt_print_aggs(&aggdata, 1, arg)); 2065 } 2066 2067 int 2068 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data, 2069 const char *option, const char *value) 2070 { 2071 int len, rval; 2072 char *msg; 2073 const char *errstr; 2074 dtrace_setoptdata_t optdata; 2075 2076 bzero(&optdata, sizeof (optdata)); 2077 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval); 2078 2079 if (dtrace_setopt(dtp, option, value) == 0) { 2080 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval); 2081 optdata.dtsda_probe = data; 2082 optdata.dtsda_option = option; 2083 optdata.dtsda_handle = dtp; 2084 2085 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0) 2086 return (rval); 2087 2088 return (0); 2089 } 2090 2091 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp)); 2092 len = strlen(option) + strlen(value) + strlen(errstr) + 80; 2093 msg = alloca(len); 2094 2095 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n", 2096 option, value, errstr); 2097 2098 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0) 2099 return (0); 2100 2101 return (rval); 2102 } 2103 2104 static int 2105 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu, 2106 dtrace_bufdesc_t *buf, boolean_t just_one, 2107 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg) 2108 { 2109 dtrace_epid_t id; 2110 size_t offs; 2111 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET); 2112 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 2113 int rval, i, n; 2114 uint64_t tracememsize = 0; 2115 dtrace_probedata_t data; 2116 uint64_t drops; 2117 2118 bzero(&data, sizeof (data)); 2119 data.dtpda_handle = dtp; 2120 data.dtpda_cpu = cpu; 2121 data.dtpda_flow = dtp->dt_flow; 2122 data.dtpda_indent = dtp->dt_indent; 2123 data.dtpda_prefix = dtp->dt_prefix; 2124 2125 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) { 2126 dtrace_eprobedesc_t *epd; 2127 2128 /* 2129 * We're guaranteed to have an ID. 2130 */ 2131 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 2132 2133 if (id == DTRACE_EPIDNONE) { 2134 /* 2135 * This is filler to assure proper alignment of the 2136 * next record; we simply ignore it. 2137 */ 2138 offs += sizeof (id); 2139 continue; 2140 } 2141 2142 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc, 2143 &data.dtpda_pdesc)) != 0) 2144 return (rval); 2145 2146 epd = data.dtpda_edesc; 2147 data.dtpda_data = buf->dtbd_data + offs; 2148 2149 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) { 2150 rval = dt_handle(dtp, &data); 2151 2152 if (rval == DTRACE_CONSUME_NEXT) 2153 goto nextepid; 2154 2155 if (rval == DTRACE_CONSUME_ERROR) 2156 return (-1); 2157 } 2158 2159 if (flow) 2160 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid, 2161 buf, offs); 2162 2163 rval = (*efunc)(&data, arg); 2164 2165 if (flow) { 2166 if (data.dtpda_flow == DTRACEFLOW_ENTRY) 2167 data.dtpda_indent += 2; 2168 } 2169 2170 if (rval == DTRACE_CONSUME_NEXT) 2171 goto nextepid; 2172 2173 if (rval == DTRACE_CONSUME_ABORT) 2174 return (dt_set_errno(dtp, EDT_DIRABORT)); 2175 2176 if (rval != DTRACE_CONSUME_THIS) 2177 return (dt_set_errno(dtp, EDT_BADRVAL)); 2178 2179 for (i = 0; i < epd->dtepd_nrecs; i++) { 2180 caddr_t addr; 2181 dtrace_recdesc_t *rec = &epd->dtepd_rec[i]; 2182 dtrace_actkind_t act = rec->dtrd_action; 2183 2184 data.dtpda_data = buf->dtbd_data + offs + 2185 rec->dtrd_offset; 2186 addr = data.dtpda_data; 2187 2188 if (act == DTRACEACT_LIBACT) { 2189 uint64_t arg = rec->dtrd_arg; 2190 dtrace_aggvarid_t id; 2191 2192 switch (arg) { 2193 case DT_ACT_CLEAR: 2194 /* LINTED - alignment */ 2195 id = *((dtrace_aggvarid_t *)addr); 2196 (void) dtrace_aggregate_walk(dtp, 2197 dt_clear_agg, &id); 2198 continue; 2199 2200 case DT_ACT_DENORMALIZE: 2201 /* LINTED - alignment */ 2202 id = *((dtrace_aggvarid_t *)addr); 2203 (void) dtrace_aggregate_walk(dtp, 2204 dt_denormalize_agg, &id); 2205 continue; 2206 2207 case DT_ACT_FTRUNCATE: 2208 if (fp == NULL) 2209 continue; 2210 2211 (void) fflush(fp); 2212 (void) ftruncate(fileno(fp), 0); 2213 (void) fseeko(fp, 0, SEEK_SET); 2214 continue; 2215 2216 case DT_ACT_NORMALIZE: 2217 if (i == epd->dtepd_nrecs - 1) 2218 return (dt_set_errno(dtp, 2219 EDT_BADNORMAL)); 2220 2221 if (dt_normalize(dtp, 2222 buf->dtbd_data + offs, rec) != 0) 2223 return (-1); 2224 2225 i++; 2226 continue; 2227 2228 case DT_ACT_SETOPT: { 2229 uint64_t *opts = dtp->dt_options; 2230 dtrace_recdesc_t *valrec; 2231 uint32_t valsize; 2232 caddr_t val; 2233 int rv; 2234 2235 if (i == epd->dtepd_nrecs - 1) { 2236 return (dt_set_errno(dtp, 2237 EDT_BADSETOPT)); 2238 } 2239 2240 valrec = &epd->dtepd_rec[++i]; 2241 valsize = valrec->dtrd_size; 2242 2243 if (valrec->dtrd_action != act || 2244 valrec->dtrd_arg != arg) { 2245 return (dt_set_errno(dtp, 2246 EDT_BADSETOPT)); 2247 } 2248 2249 if (valsize > sizeof (uint64_t)) { 2250 val = buf->dtbd_data + offs + 2251 valrec->dtrd_offset; 2252 } else { 2253 val = "1"; 2254 } 2255 2256 rv = dt_setopt(dtp, &data, addr, val); 2257 2258 if (rv != 0) 2259 return (-1); 2260 2261 flow = (opts[DTRACEOPT_FLOWINDENT] != 2262 DTRACEOPT_UNSET); 2263 quiet = (opts[DTRACEOPT_QUIET] != 2264 DTRACEOPT_UNSET); 2265 2266 continue; 2267 } 2268 2269 case DT_ACT_TRUNC: 2270 if (i == epd->dtepd_nrecs - 1) 2271 return (dt_set_errno(dtp, 2272 EDT_BADTRUNC)); 2273 2274 if (dt_trunc(dtp, 2275 buf->dtbd_data + offs, rec) != 0) 2276 return (-1); 2277 2278 i++; 2279 continue; 2280 2281 default: 2282 continue; 2283 } 2284 } 2285 2286 if (act == DTRACEACT_TRACEMEM_DYNSIZE && 2287 rec->dtrd_size == sizeof (uint64_t)) { 2288 /* LINTED - alignment */ 2289 tracememsize = *((unsigned long long *)addr); 2290 continue; 2291 } 2292 2293 rval = (*rfunc)(&data, rec, arg); 2294 2295 if (rval == DTRACE_CONSUME_NEXT) 2296 continue; 2297 2298 if (rval == DTRACE_CONSUME_ABORT) 2299 return (dt_set_errno(dtp, EDT_DIRABORT)); 2300 2301 if (rval != DTRACE_CONSUME_THIS) 2302 return (dt_set_errno(dtp, EDT_BADRVAL)); 2303 2304 if (act == DTRACEACT_STACK) { 2305 int depth = rec->dtrd_arg; 2306 2307 if (dt_print_stack(dtp, fp, NULL, addr, depth, 2308 rec->dtrd_size / depth) < 0) 2309 return (-1); 2310 goto nextrec; 2311 } 2312 2313 if (act == DTRACEACT_USTACK || 2314 act == DTRACEACT_JSTACK) { 2315 if (dt_print_ustack(dtp, fp, NULL, 2316 addr, rec->dtrd_arg) < 0) 2317 return (-1); 2318 goto nextrec; 2319 } 2320 2321 if (act == DTRACEACT_SYM) { 2322 if (dt_print_sym(dtp, fp, NULL, addr) < 0) 2323 return (-1); 2324 goto nextrec; 2325 } 2326 2327 if (act == DTRACEACT_MOD) { 2328 if (dt_print_mod(dtp, fp, NULL, addr) < 0) 2329 return (-1); 2330 goto nextrec; 2331 } 2332 2333 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) { 2334 if (dt_print_usym(dtp, fp, addr, act) < 0) 2335 return (-1); 2336 goto nextrec; 2337 } 2338 2339 if (act == DTRACEACT_UMOD) { 2340 if (dt_print_umod(dtp, fp, NULL, addr) < 0) 2341 return (-1); 2342 goto nextrec; 2343 } 2344 2345 if (act == DTRACEACT_PRINTM) { 2346 if (dt_print_memory(dtp, fp, addr) < 0) 2347 return (-1); 2348 goto nextrec; 2349 } 2350 2351 if (DTRACEACT_ISPRINTFLIKE(act)) { 2352 void *fmtdata; 2353 int (*func)(dtrace_hdl_t *, FILE *, void *, 2354 const dtrace_probedata_t *, 2355 const dtrace_recdesc_t *, uint_t, 2356 const void *buf, size_t); 2357 2358 if ((fmtdata = dt_format_lookup(dtp, 2359 rec->dtrd_format)) == NULL) 2360 goto nofmt; 2361 2362 switch (act) { 2363 case DTRACEACT_PRINTF: 2364 func = dtrace_fprintf; 2365 break; 2366 case DTRACEACT_PRINTA: 2367 func = dtrace_fprinta; 2368 break; 2369 case DTRACEACT_SYSTEM: 2370 func = dtrace_system; 2371 break; 2372 case DTRACEACT_FREOPEN: 2373 func = dtrace_freopen; 2374 break; 2375 } 2376 2377 n = (*func)(dtp, fp, fmtdata, &data, 2378 rec, epd->dtepd_nrecs - i, 2379 (uchar_t *)buf->dtbd_data + offs, 2380 buf->dtbd_size - offs); 2381 2382 if (n < 0) 2383 return (-1); /* errno is set for us */ 2384 2385 if (n > 0) 2386 i += n - 1; 2387 goto nextrec; 2388 } 2389 2390 /* 2391 * If this is a DIF expression, and the record has a 2392 * format set, this indicates we have a CTF type name 2393 * associated with the data and we should try to print 2394 * it out by type. 2395 */ 2396 if (act == DTRACEACT_DIFEXPR) { 2397 const char *strdata = dt_strdata_lookup(dtp, 2398 rec->dtrd_format); 2399 if (strdata != NULL) { 2400 n = dtrace_print(dtp, fp, strdata, 2401 addr, rec->dtrd_size); 2402 2403 /* 2404 * dtrace_print() will return -1 on 2405 * error, or return the number of bytes 2406 * consumed. It will return 0 if the 2407 * type couldn't be determined, and we 2408 * should fall through to the normal 2409 * trace method. 2410 */ 2411 if (n < 0) 2412 return (-1); 2413 2414 if (n > 0) 2415 goto nextrec; 2416 } 2417 } 2418 2419 nofmt: 2420 if (act == DTRACEACT_PRINTA) { 2421 dt_print_aggdata_t pd; 2422 dtrace_aggvarid_t *aggvars; 2423 int j, naggvars = 0; 2424 size_t size = ((epd->dtepd_nrecs - i) * 2425 sizeof (dtrace_aggvarid_t)); 2426 2427 if ((aggvars = dt_alloc(dtp, size)) == NULL) 2428 return (-1); 2429 2430 /* 2431 * This might be a printa() with multiple 2432 * aggregation variables. We need to scan 2433 * forward through the records until we find 2434 * a record from a different statement. 2435 */ 2436 for (j = i; j < epd->dtepd_nrecs; j++) { 2437 dtrace_recdesc_t *nrec; 2438 caddr_t naddr; 2439 2440 nrec = &epd->dtepd_rec[j]; 2441 2442 if (nrec->dtrd_uarg != rec->dtrd_uarg) 2443 break; 2444 2445 if (nrec->dtrd_action != act) { 2446 return (dt_set_errno(dtp, 2447 EDT_BADAGG)); 2448 } 2449 2450 naddr = buf->dtbd_data + offs + 2451 nrec->dtrd_offset; 2452 2453 aggvars[naggvars++] = 2454 /* LINTED - alignment */ 2455 *((dtrace_aggvarid_t *)naddr); 2456 } 2457 2458 i = j - 1; 2459 bzero(&pd, sizeof (pd)); 2460 pd.dtpa_dtp = dtp; 2461 pd.dtpa_fp = fp; 2462 2463 assert(naggvars >= 1); 2464 2465 if (naggvars == 1) { 2466 pd.dtpa_id = aggvars[0]; 2467 dt_free(dtp, aggvars); 2468 2469 if (dt_printf(dtp, fp, "\n") < 0 || 2470 dtrace_aggregate_walk_sorted(dtp, 2471 dt_print_agg, &pd) < 0) 2472 return (-1); 2473 goto nextrec; 2474 } 2475 2476 if (dt_printf(dtp, fp, "\n") < 0 || 2477 dtrace_aggregate_walk_joined(dtp, aggvars, 2478 naggvars, dt_print_aggs, &pd) < 0) { 2479 dt_free(dtp, aggvars); 2480 return (-1); 2481 } 2482 2483 dt_free(dtp, aggvars); 2484 goto nextrec; 2485 } 2486 2487 if (act == DTRACEACT_TRACEMEM) { 2488 if (tracememsize == 0 || 2489 tracememsize > rec->dtrd_size) { 2490 tracememsize = rec->dtrd_size; 2491 } 2492 2493 n = dt_print_bytes(dtp, fp, addr, 2494 tracememsize, -33, quiet, 1); 2495 2496 tracememsize = 0; 2497 2498 if (n < 0) 2499 return (-1); 2500 2501 goto nextrec; 2502 } 2503 2504 switch (rec->dtrd_size) { 2505 case sizeof (uint64_t): 2506 n = dt_printf(dtp, fp, 2507 quiet ? "%lld" : " %16lld", 2508 /* LINTED - alignment */ 2509 *((unsigned long long *)addr)); 2510 break; 2511 case sizeof (uint32_t): 2512 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", 2513 /* LINTED - alignment */ 2514 *((uint32_t *)addr)); 2515 break; 2516 case sizeof (uint16_t): 2517 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", 2518 /* LINTED - alignment */ 2519 *((uint16_t *)addr)); 2520 break; 2521 case sizeof (uint8_t): 2522 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", 2523 *((uint8_t *)addr)); 2524 break; 2525 default: 2526 n = dt_print_bytes(dtp, fp, addr, 2527 rec->dtrd_size, -33, quiet, 0); 2528 break; 2529 } 2530 2531 if (n < 0) 2532 return (-1); /* errno is set for us */ 2533 2534 nextrec: 2535 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0) 2536 return (-1); /* errno is set for us */ 2537 } 2538 2539 /* 2540 * Call the record callback with a NULL record to indicate 2541 * that we're done processing this EPID. 2542 */ 2543 rval = (*rfunc)(&data, NULL, arg); 2544 nextepid: 2545 offs += epd->dtepd_size; 2546 dtp->dt_last_epid = id; 2547 if (just_one) { 2548 buf->dtbd_oldest = offs; 2549 break; 2550 } 2551 } 2552 2553 dtp->dt_flow = data.dtpda_flow; 2554 dtp->dt_indent = data.dtpda_indent; 2555 dtp->dt_prefix = data.dtpda_prefix; 2556 2557 if ((drops = buf->dtbd_drops) == 0) 2558 return (0); 2559 2560 /* 2561 * Explicitly zero the drops to prevent us from processing them again. 2562 */ 2563 buf->dtbd_drops = 0; 2564 2565 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops)); 2566 } 2567 2568 /* 2569 * Reduce memory usage by shrinking the buffer if it's no more than half full. 2570 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is 2571 * only 4-byte aligned. 2572 */ 2573 static void 2574 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize) 2575 { 2576 uint64_t used = buf->dtbd_size - buf->dtbd_oldest; 2577 if (used < cursize / 2) { 2578 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2579 char *newdata = dt_alloc(dtp, used + misalign); 2580 if (newdata == NULL) 2581 return; 2582 bzero(newdata, misalign); 2583 bcopy(buf->dtbd_data + buf->dtbd_oldest, 2584 newdata + misalign, used); 2585 dt_free(dtp, buf->dtbd_data); 2586 buf->dtbd_oldest = misalign; 2587 buf->dtbd_size = used + misalign; 2588 buf->dtbd_data = newdata; 2589 } 2590 } 2591 2592 /* 2593 * If the ring buffer has wrapped, the data is not in order. Rearrange it 2594 * so that it is. Note, we need to preserve the alignment of the data at 2595 * dtbd_oldest, which is only 4-byte aligned. 2596 */ 2597 static int 2598 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2599 { 2600 int misalign; 2601 char *newdata, *ndp; 2602 2603 if (buf->dtbd_oldest == 0) 2604 return (0); 2605 2606 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2607 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign); 2608 2609 if (newdata == NULL) 2610 return (-1); 2611 2612 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1))); 2613 2614 bzero(ndp, misalign); 2615 ndp += misalign; 2616 2617 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp, 2618 buf->dtbd_size - buf->dtbd_oldest); 2619 ndp += buf->dtbd_size - buf->dtbd_oldest; 2620 2621 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest); 2622 2623 dt_free(dtp, buf->dtbd_data); 2624 buf->dtbd_oldest = 0; 2625 buf->dtbd_data = newdata; 2626 buf->dtbd_size += misalign; 2627 2628 return (0); 2629 } 2630 2631 static void 2632 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2633 { 2634 dt_free(dtp, buf->dtbd_data); 2635 dt_free(dtp, buf); 2636 } 2637 2638 /* 2639 * Returns 0 on success, in which case *cbp will be filled in if we retrieved 2640 * data, or NULL if there is no data for this CPU. 2641 * Returns -1 on failure and sets dt_errno. 2642 */ 2643 static int 2644 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp) 2645 { 2646 dtrace_optval_t size; 2647 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf)); 2648 int error, rval; 2649 2650 if (buf == NULL) 2651 return (-1); 2652 2653 (void) dtrace_getopt(dtp, "bufsize", &size); 2654 buf->dtbd_data = dt_alloc(dtp, size); 2655 if (buf->dtbd_data == NULL) { 2656 dt_free(dtp, buf); 2657 return (-1); 2658 } 2659 buf->dtbd_size = size; 2660 buf->dtbd_cpu = cpu; 2661 2662 #ifdef illumos 2663 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2664 #else 2665 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &buf) == -1) { 2666 #endif 2667 /* 2668 * If we failed with ENOENT, it may be because the 2669 * CPU was unconfigured -- this is okay. Any other 2670 * error, however, is unexpected. 2671 */ 2672 if (errno == ENOENT) { 2673 *bufp = NULL; 2674 rval = 0; 2675 } else 2676 rval = dt_set_errno(dtp, errno); 2677 2678 dt_put_buf(dtp, buf); 2679 return (rval); 2680 } 2681 2682 error = dt_unring_buf(dtp, buf); 2683 if (error != 0) { 2684 dt_put_buf(dtp, buf); 2685 return (error); 2686 } 2687 dt_realloc_buf(dtp, buf, size); 2688 2689 *bufp = buf; 2690 return (0); 2691 } 2692 2693 typedef struct dt_begin { 2694 dtrace_consume_probe_f *dtbgn_probefunc; 2695 dtrace_consume_rec_f *dtbgn_recfunc; 2696 void *dtbgn_arg; 2697 dtrace_handle_err_f *dtbgn_errhdlr; 2698 void *dtbgn_errarg; 2699 int dtbgn_beginonly; 2700 } dt_begin_t; 2701 2702 static int 2703 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg) 2704 { 2705 dt_begin_t *begin = arg; 2706 dtrace_probedesc_t *pd = data->dtpda_pdesc; 2707 2708 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2709 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2710 2711 if (begin->dtbgn_beginonly) { 2712 if (!(r1 && r2)) 2713 return (DTRACE_CONSUME_NEXT); 2714 } else { 2715 if (r1 && r2) 2716 return (DTRACE_CONSUME_NEXT); 2717 } 2718 2719 /* 2720 * We have a record that we're interested in. Now call the underlying 2721 * probe function... 2722 */ 2723 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg)); 2724 } 2725 2726 static int 2727 dt_consume_begin_record(const dtrace_probedata_t *data, 2728 const dtrace_recdesc_t *rec, void *arg) 2729 { 2730 dt_begin_t *begin = arg; 2731 2732 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg)); 2733 } 2734 2735 static int 2736 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg) 2737 { 2738 dt_begin_t *begin = (dt_begin_t *)arg; 2739 dtrace_probedesc_t *pd = data->dteda_pdesc; 2740 2741 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 2742 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 2743 2744 if (begin->dtbgn_beginonly) { 2745 if (!(r1 && r2)) 2746 return (DTRACE_HANDLE_OK); 2747 } else { 2748 if (r1 && r2) 2749 return (DTRACE_HANDLE_OK); 2750 } 2751 2752 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg)); 2753 } 2754 2755 static int 2756 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, 2757 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2758 { 2759 /* 2760 * There's this idea that the BEGIN probe should be processed before 2761 * everything else, and that the END probe should be processed after 2762 * anything else. In the common case, this is pretty easy to deal 2763 * with. However, a situation may arise where the BEGIN enabling and 2764 * END enabling are on the same CPU, and some enabling in the middle 2765 * occurred on a different CPU. To deal with this (blech!) we need to 2766 * consume the BEGIN buffer up until the end of the BEGIN probe, and 2767 * then set it aside. We will then process every other CPU, and then 2768 * we'll return to the BEGIN CPU and process the rest of the data 2769 * (which will inevitably include the END probe, if any). Making this 2770 * even more complicated (!) is the library's ERROR enabling. Because 2771 * this enabling is processed before we even get into the consume call 2772 * back, any ERROR firing would result in the library's ERROR enabling 2773 * being processed twice -- once in our first pass (for BEGIN probes), 2774 * and again in our second pass (for everything but BEGIN probes). To 2775 * deal with this, we interpose on the ERROR handler to assure that we 2776 * only process ERROR enablings induced by BEGIN enablings in the 2777 * first pass, and that we only process ERROR enablings _not_ induced 2778 * by BEGIN enablings in the second pass. 2779 */ 2780 2781 dt_begin_t begin; 2782 processorid_t cpu = dtp->dt_beganon; 2783 int rval, i; 2784 static int max_ncpus; 2785 dtrace_bufdesc_t *buf; 2786 2787 dtp->dt_beganon = -1; 2788 2789 if (dt_get_buf(dtp, cpu, &buf) != 0) 2790 return (-1); 2791 if (buf == NULL) 2792 return (0); 2793 2794 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { 2795 /* 2796 * This is the simple case. We're either not stopped, or if 2797 * we are, we actually processed any END probes on another 2798 * CPU. We can simply consume this buffer and return. 2799 */ 2800 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2801 pf, rf, arg); 2802 dt_put_buf(dtp, buf); 2803 return (rval); 2804 } 2805 2806 begin.dtbgn_probefunc = pf; 2807 begin.dtbgn_recfunc = rf; 2808 begin.dtbgn_arg = arg; 2809 begin.dtbgn_beginonly = 1; 2810 2811 /* 2812 * We need to interpose on the ERROR handler to be sure that we 2813 * only process ERRORs induced by BEGIN. 2814 */ 2815 begin.dtbgn_errhdlr = dtp->dt_errhdlr; 2816 begin.dtbgn_errarg = dtp->dt_errarg; 2817 dtp->dt_errhdlr = dt_consume_begin_error; 2818 dtp->dt_errarg = &begin; 2819 2820 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2821 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2822 2823 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2824 dtp->dt_errarg = begin.dtbgn_errarg; 2825 2826 if (rval != 0) { 2827 dt_put_buf(dtp, buf); 2828 return (rval); 2829 } 2830 2831 if (max_ncpus == 0) 2832 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2833 2834 for (i = 0; i < max_ncpus; i++) { 2835 dtrace_bufdesc_t *nbuf; 2836 if (i == cpu) 2837 continue; 2838 2839 if (dt_get_buf(dtp, i, &nbuf) != 0) { 2840 dt_put_buf(dtp, buf); 2841 return (-1); 2842 } 2843 if (nbuf == NULL) 2844 continue; 2845 2846 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE, 2847 pf, rf, arg); 2848 dt_put_buf(dtp, nbuf); 2849 if (rval != 0) { 2850 dt_put_buf(dtp, buf); 2851 return (rval); 2852 } 2853 } 2854 2855 /* 2856 * Okay -- we're done with the other buffers. Now we want to 2857 * reconsume the first buffer -- but this time we're looking for 2858 * everything _but_ BEGIN. And of course, in order to only consume 2859 * those ERRORs _not_ associated with BEGIN, we need to reinstall our 2860 * ERROR interposition function... 2861 */ 2862 begin.dtbgn_beginonly = 0; 2863 2864 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); 2865 assert(begin.dtbgn_errarg == dtp->dt_errarg); 2866 dtp->dt_errhdlr = dt_consume_begin_error; 2867 dtp->dt_errarg = &begin; 2868 2869 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 2870 dt_consume_begin_probe, dt_consume_begin_record, &begin); 2871 2872 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 2873 dtp->dt_errarg = begin.dtbgn_errarg; 2874 2875 return (rval); 2876 } 2877 2878 /* ARGSUSED */ 2879 static uint64_t 2880 dt_buf_oldest(void *elem, void *arg) 2881 { 2882 dtrace_bufdesc_t *buf = elem; 2883 size_t offs = buf->dtbd_oldest; 2884 2885 while (offs < buf->dtbd_size) { 2886 dtrace_rechdr_t *dtrh = 2887 /* LINTED - alignment */ 2888 (dtrace_rechdr_t *)(buf->dtbd_data + offs); 2889 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2890 offs += sizeof (dtrace_epid_t); 2891 } else { 2892 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh)); 2893 } 2894 } 2895 2896 /* There are no records left; use the time the buffer was retrieved. */ 2897 return (buf->dtbd_timestamp); 2898 } 2899 2900 int 2901 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp, 2902 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 2903 { 2904 dtrace_optval_t size; 2905 static int max_ncpus; 2906 int i, rval; 2907 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; 2908 hrtime_t now = gethrtime(); 2909 2910 if (dtp->dt_lastswitch != 0) { 2911 if (now - dtp->dt_lastswitch < interval) 2912 return (0); 2913 2914 dtp->dt_lastswitch += interval; 2915 } else { 2916 dtp->dt_lastswitch = now; 2917 } 2918 2919 if (!dtp->dt_active) 2920 return (dt_set_errno(dtp, EINVAL)); 2921 2922 if (max_ncpus == 0) 2923 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 2924 2925 if (pf == NULL) 2926 pf = (dtrace_consume_probe_f *)dt_nullprobe; 2927 2928 if (rf == NULL) 2929 rf = (dtrace_consume_rec_f *)dt_nullrec; 2930 2931 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) { 2932 /* 2933 * The output will not be in the order it was traced. Rather, 2934 * we will consume all of the data from each CPU's buffer in 2935 * turn. We apply special handling for the records from BEGIN 2936 * and END probes so that they are consumed first and last, 2937 * respectively. 2938 * 2939 * If we have just begun, we want to first process the CPU that 2940 * executed the BEGIN probe (if any). 2941 */ 2942 if (dtp->dt_active && dtp->dt_beganon != -1 && 2943 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0) 2944 return (rval); 2945 2946 for (i = 0; i < max_ncpus; i++) { 2947 dtrace_bufdesc_t *buf; 2948 2949 /* 2950 * If we have stopped, we want to process the CPU on 2951 * which the END probe was processed only _after_ we 2952 * have processed everything else. 2953 */ 2954 if (dtp->dt_stopped && (i == dtp->dt_endedon)) 2955 continue; 2956 2957 if (dt_get_buf(dtp, i, &buf) != 0) 2958 return (-1); 2959 if (buf == NULL) 2960 continue; 2961 2962 dtp->dt_flow = 0; 2963 dtp->dt_indent = 0; 2964 dtp->dt_prefix = NULL; 2965 rval = dt_consume_cpu(dtp, fp, i, 2966 buf, B_FALSE, pf, rf, arg); 2967 dt_put_buf(dtp, buf); 2968 if (rval != 0) 2969 return (rval); 2970 } 2971 if (dtp->dt_stopped) { 2972 dtrace_bufdesc_t *buf; 2973 2974 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0) 2975 return (-1); 2976 if (buf == NULL) 2977 return (0); 2978 2979 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon, 2980 buf, B_FALSE, pf, rf, arg); 2981 dt_put_buf(dtp, buf); 2982 return (rval); 2983 } 2984 } else { 2985 /* 2986 * The output will be in the order it was traced (or for 2987 * speculations, when it was committed). We retrieve a buffer 2988 * from each CPU and put it into a priority queue, which sorts 2989 * based on the first entry in the buffer. This is sufficient 2990 * because entries within a buffer are already sorted. 2991 * 2992 * We then consume records one at a time, always consuming the 2993 * oldest record, as determined by the priority queue. When 2994 * we reach the end of the time covered by these buffers, 2995 * we need to stop and retrieve more records on the next pass. 2996 * The kernel tells us the time covered by each buffer, in 2997 * dtbd_timestamp. The first buffer's timestamp tells us the 2998 * time covered by all buffers, as subsequently retrieved 2999 * buffers will cover to a more recent time. 3000 */ 3001 3002 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t)); 3003 uint64_t first_timestamp = 0; 3004 uint_t cookie = 0; 3005 dtrace_bufdesc_t *buf; 3006 3007 bzero(drops, max_ncpus * sizeof (uint64_t)); 3008 3009 if (dtp->dt_bufq == NULL) { 3010 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2, 3011 dt_buf_oldest, NULL); 3012 if (dtp->dt_bufq == NULL) /* ENOMEM */ 3013 return (-1); 3014 } 3015 3016 /* Retrieve data from each CPU. */ 3017 (void) dtrace_getopt(dtp, "bufsize", &size); 3018 for (i = 0; i < max_ncpus; i++) { 3019 dtrace_bufdesc_t *buf; 3020 3021 if (dt_get_buf(dtp, i, &buf) != 0) 3022 return (-1); 3023 if (buf != NULL) { 3024 if (first_timestamp == 0) 3025 first_timestamp = buf->dtbd_timestamp; 3026 assert(buf->dtbd_timestamp >= first_timestamp); 3027 3028 dt_pq_insert(dtp->dt_bufq, buf); 3029 drops[i] = buf->dtbd_drops; 3030 buf->dtbd_drops = 0; 3031 } 3032 } 3033 3034 /* Consume records. */ 3035 for (;;) { 3036 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq); 3037 uint64_t timestamp; 3038 3039 if (buf == NULL) 3040 break; 3041 3042 timestamp = dt_buf_oldest(buf, dtp); 3043 assert(timestamp >= dtp->dt_last_timestamp); 3044 dtp->dt_last_timestamp = timestamp; 3045 3046 if (timestamp == buf->dtbd_timestamp) { 3047 /* 3048 * We've reached the end of the time covered 3049 * by this buffer. If this is the oldest 3050 * buffer, we must do another pass 3051 * to retrieve more data. 3052 */ 3053 dt_put_buf(dtp, buf); 3054 if (timestamp == first_timestamp && 3055 !dtp->dt_stopped) 3056 break; 3057 continue; 3058 } 3059 3060 if ((rval = dt_consume_cpu(dtp, fp, 3061 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0) 3062 return (rval); 3063 dt_pq_insert(dtp->dt_bufq, buf); 3064 } 3065 3066 /* Consume drops. */ 3067 for (i = 0; i < max_ncpus; i++) { 3068 if (drops[i] != 0) { 3069 int error = dt_handle_cpudrop(dtp, i, 3070 DTRACEDROP_PRINCIPAL, drops[i]); 3071 if (error != 0) 3072 return (error); 3073 } 3074 } 3075 3076 /* 3077 * Reduce memory usage by re-allocating smaller buffers 3078 * for the "remnants". 3079 */ 3080 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie)) 3081 dt_realloc_buf(dtp, buf, buf->dtbd_size); 3082 } 3083 3084 return (0); 3085 } 3086