1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2017, Joyent, Inc. All rights reserved.
28 * Copyright (c) 2012 by Delphix. All rights reserved.
29 */
30
31 #include <stdlib.h>
32 #include <strings.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <limits.h>
36 #include <assert.h>
37 #include <ctype.h>
38 #include <alloca.h>
39 #include <dt_impl.h>
40 #include <dt_pq.h>
41
42 #define DT_MASK_LO 0x00000000FFFFFFFFULL
43
44 /*
45 * We declare this here because (1) we need it and (2) we want to avoid a
46 * dependency on libm in libdtrace.
47 */
48 static long double
dt_fabsl(long double x)49 dt_fabsl(long double x)
50 {
51 if (x < 0)
52 return (-x);
53
54 return (x);
55 }
56
57 static int
dt_ndigits(long long val)58 dt_ndigits(long long val)
59 {
60 int rval = 1;
61 long long cmp = 10;
62
63 if (val < 0) {
64 val = val == INT64_MIN ? INT64_MAX : -val;
65 rval++;
66 }
67
68 while (val > cmp && cmp > 0) {
69 rval++;
70 cmp *= 10;
71 }
72
73 return (rval < 4 ? 4 : rval);
74 }
75
76 /*
77 * 128-bit arithmetic functions needed to support the stddev() aggregating
78 * action.
79 */
80 static int
dt_gt_128(uint64_t * a,uint64_t * b)81 dt_gt_128(uint64_t *a, uint64_t *b)
82 {
83 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0]));
84 }
85
86 static int
dt_ge_128(uint64_t * a,uint64_t * b)87 dt_ge_128(uint64_t *a, uint64_t *b)
88 {
89 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0]));
90 }
91
92 static int
dt_le_128(uint64_t * a,uint64_t * b)93 dt_le_128(uint64_t *a, uint64_t *b)
94 {
95 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0]));
96 }
97
98 /*
99 * Shift the 128-bit value in a by b. If b is positive, shift left.
100 * If b is negative, shift right.
101 */
102 static void
dt_shift_128(uint64_t * a,int b)103 dt_shift_128(uint64_t *a, int b)
104 {
105 uint64_t mask;
106
107 if (b == 0)
108 return;
109
110 if (b < 0) {
111 b = -b;
112 if (b >= 64) {
113 a[0] = a[1] >> (b - 64);
114 a[1] = 0;
115 } else {
116 a[0] >>= b;
117 mask = 1LL << (64 - b);
118 mask -= 1;
119 a[0] |= ((a[1] & mask) << (64 - b));
120 a[1] >>= b;
121 }
122 } else {
123 if (b >= 64) {
124 a[1] = a[0] << (b - 64);
125 a[0] = 0;
126 } else {
127 a[1] <<= b;
128 mask = a[0] >> (64 - b);
129 a[1] |= mask;
130 a[0] <<= b;
131 }
132 }
133 }
134
135 static int
dt_nbits_128(uint64_t * a)136 dt_nbits_128(uint64_t *a)
137 {
138 int nbits = 0;
139 uint64_t tmp[2];
140 uint64_t zero[2] = { 0, 0 };
141
142 tmp[0] = a[0];
143 tmp[1] = a[1];
144
145 dt_shift_128(tmp, -1);
146 while (dt_gt_128(tmp, zero)) {
147 dt_shift_128(tmp, -1);
148 nbits++;
149 }
150
151 return (nbits);
152 }
153
154 static void
dt_subtract_128(uint64_t * minuend,uint64_t * subtrahend,uint64_t * difference)155 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference)
156 {
157 uint64_t result[2];
158
159 result[0] = minuend[0] - subtrahend[0];
160 result[1] = minuend[1] - subtrahend[1] -
161 (minuend[0] < subtrahend[0] ? 1 : 0);
162
163 difference[0] = result[0];
164 difference[1] = result[1];
165 }
166
167 static void
dt_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)168 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
169 {
170 uint64_t result[2];
171
172 result[0] = addend1[0] + addend2[0];
173 result[1] = addend1[1] + addend2[1] +
174 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
175
176 sum[0] = result[0];
177 sum[1] = result[1];
178 }
179
180 /*
181 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
182 * use native multiplication on those, and then re-combine into the
183 * resulting 128-bit value.
184 *
185 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
186 * hi1 * hi2 << 64 +
187 * hi1 * lo2 << 32 +
188 * hi2 * lo1 << 32 +
189 * lo1 * lo2
190 */
191 static void
dt_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)192 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
193 {
194 uint64_t hi1, hi2, lo1, lo2;
195 uint64_t tmp[2];
196
197 hi1 = factor1 >> 32;
198 hi2 = factor2 >> 32;
199
200 lo1 = factor1 & DT_MASK_LO;
201 lo2 = factor2 & DT_MASK_LO;
202
203 product[0] = lo1 * lo2;
204 product[1] = hi1 * hi2;
205
206 tmp[0] = hi1 * lo2;
207 tmp[1] = 0;
208 dt_shift_128(tmp, 32);
209 dt_add_128(product, tmp, product);
210
211 tmp[0] = hi2 * lo1;
212 tmp[1] = 0;
213 dt_shift_128(tmp, 32);
214 dt_add_128(product, tmp, product);
215 }
216
217 /*
218 * This is long-hand division.
219 *
220 * We initialize subtrahend by shifting divisor left as far as possible. We
221 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we
222 * subtract and set the appropriate bit in the result. We then shift
223 * subtrahend right by one bit for the next comparison.
224 */
225 static void
dt_divide_128(uint64_t * dividend,uint64_t divisor,uint64_t * quotient)226 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient)
227 {
228 uint64_t result[2] = { 0, 0 };
229 uint64_t remainder[2];
230 uint64_t subtrahend[2];
231 uint64_t divisor_128[2];
232 uint64_t mask[2] = { 1, 0 };
233 int log = 0;
234
235 assert(divisor != 0);
236
237 divisor_128[0] = divisor;
238 divisor_128[1] = 0;
239
240 remainder[0] = dividend[0];
241 remainder[1] = dividend[1];
242
243 subtrahend[0] = divisor;
244 subtrahend[1] = 0;
245
246 while (divisor > 0) {
247 log++;
248 divisor >>= 1;
249 }
250
251 dt_shift_128(subtrahend, 128 - log);
252 dt_shift_128(mask, 128 - log);
253
254 while (dt_ge_128(remainder, divisor_128)) {
255 if (dt_ge_128(remainder, subtrahend)) {
256 dt_subtract_128(remainder, subtrahend, remainder);
257 result[0] |= mask[0];
258 result[1] |= mask[1];
259 }
260
261 dt_shift_128(subtrahend, -1);
262 dt_shift_128(mask, -1);
263 }
264
265 quotient[0] = result[0];
266 quotient[1] = result[1];
267 }
268
269 /*
270 * This is the long-hand method of calculating a square root.
271 * The algorithm is as follows:
272 *
273 * 1. Group the digits by 2 from the right.
274 * 2. Over the leftmost group, find the largest single-digit number
275 * whose square is less than that group.
276 * 3. Subtract the result of the previous step (2 or 4, depending) and
277 * bring down the next two-digit group.
278 * 4. For the result R we have so far, find the largest single-digit number
279 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3.
280 * (Note that this is doubling R and performing a decimal left-shift by 1
281 * and searching for the appropriate decimal to fill the one's place.)
282 * The value x is the next digit in the square root.
283 * Repeat steps 3 and 4 until the desired precision is reached. (We're
284 * dealing with integers, so the above is sufficient.)
285 *
286 * In decimal, the square root of 582,734 would be calculated as so:
287 *
288 * __7__6__3
289 * | 58 27 34
290 * -49 (7^2 == 49 => 7 is the first digit in the square root)
291 * --
292 * 9 27 (Subtract and bring down the next group.)
293 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in
294 * ----- the square root)
295 * 51 34 (Subtract and bring down the next group.)
296 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in
297 * ----- the square root)
298 * 5 65 (remainder)
299 *
300 * The above algorithm applies similarly in binary, but note that the
301 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a
302 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the
303 * preceding difference?
304 *
305 * In binary, the square root of 11011011 would be calculated as so:
306 *
307 * __1__1__1__0
308 * | 11 01 10 11
309 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1)
310 * --
311 * 10 01 10 11
312 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1)
313 * -----
314 * 1 00 10 11
315 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1)
316 * -------
317 * 1 01 11
318 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0)
319 *
320 */
321 static uint64_t
dt_sqrt_128(uint64_t * square)322 dt_sqrt_128(uint64_t *square)
323 {
324 uint64_t result[2] = { 0, 0 };
325 uint64_t diff[2] = { 0, 0 };
326 uint64_t one[2] = { 1, 0 };
327 uint64_t next_pair[2];
328 uint64_t next_try[2];
329 uint64_t bit_pairs, pair_shift;
330 int i;
331
332 bit_pairs = dt_nbits_128(square) / 2;
333 pair_shift = bit_pairs * 2;
334
335 for (i = 0; i <= bit_pairs; i++) {
336 /*
337 * Bring down the next pair of bits.
338 */
339 next_pair[0] = square[0];
340 next_pair[1] = square[1];
341 dt_shift_128(next_pair, -pair_shift);
342 next_pair[0] &= 0x3;
343 next_pair[1] = 0;
344
345 dt_shift_128(diff, 2);
346 dt_add_128(diff, next_pair, diff);
347
348 /*
349 * next_try = R << 2 + 1
350 */
351 next_try[0] = result[0];
352 next_try[1] = result[1];
353 dt_shift_128(next_try, 2);
354 dt_add_128(next_try, one, next_try);
355
356 if (dt_le_128(next_try, diff)) {
357 dt_subtract_128(diff, next_try, diff);
358 dt_shift_128(result, 1);
359 dt_add_128(result, one, result);
360 } else {
361 dt_shift_128(result, 1);
362 }
363
364 pair_shift -= 2;
365 }
366
367 assert(result[1] == 0);
368
369 return (result[0]);
370 }
371
372 uint64_t
dt_stddev(uint64_t * data,uint64_t normal)373 dt_stddev(uint64_t *data, uint64_t normal)
374 {
375 uint64_t avg_of_squares[2];
376 uint64_t square_of_avg[2];
377 int64_t norm_avg;
378 uint64_t diff[2];
379
380 /*
381 * The standard approximation for standard deviation is
382 * sqrt(average(x**2) - average(x)**2), i.e. the square root
383 * of the average of the squares minus the square of the average.
384 * When normalizing, we should divide the sum of x**2 by normal**2.
385 */
386 dt_divide_128(data + 2, normal, avg_of_squares);
387 dt_divide_128(avg_of_squares, normal, avg_of_squares);
388 dt_divide_128(avg_of_squares, data[0], avg_of_squares);
389
390 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0];
391
392 if (norm_avg < 0)
393 norm_avg = -norm_avg;
394
395 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg);
396
397 dt_subtract_128(avg_of_squares, square_of_avg, diff);
398
399 return (dt_sqrt_128(diff));
400 }
401
402 static int
dt_flowindent(dtrace_hdl_t * dtp,dtrace_probedata_t * data,dtrace_epid_t last,dtrace_bufdesc_t * buf,size_t offs)403 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last,
404 dtrace_bufdesc_t *buf, size_t offs)
405 {
406 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd;
407 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd;
408 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub;
409 dtrace_flowkind_t flow = DTRACEFLOW_NONE;
410 const char *str = NULL;
411 static const char *e_str[2] = { " -> ", " => " };
412 static const char *r_str[2] = { " <- ", " <= " };
413 static const char *ent = "entry", *ret = "return";
414 static int entlen = 0, retlen = 0;
415 dtrace_epid_t next, id = epd->dtepd_epid;
416 int rval;
417
418 if (entlen == 0) {
419 assert(retlen == 0);
420 entlen = strlen(ent);
421 retlen = strlen(ret);
422 }
423
424 /*
425 * If the name of the probe is "entry" or ends with "-entry", we
426 * treat it as an entry; if it is "return" or ends with "-return",
427 * we treat it as a return. (This allows application-provided probes
428 * like "method-entry" or "function-entry" to participate in flow
429 * indentation -- without accidentally misinterpreting popular probe
430 * names like "carpentry", "gentry" or "Coventry".)
431 */
432 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' &&
433 (sub == n || sub[-1] == '-')) {
434 flow = DTRACEFLOW_ENTRY;
435 str = e_str[strcmp(p, "syscall") == 0];
436 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' &&
437 (sub == n || sub[-1] == '-')) {
438 flow = DTRACEFLOW_RETURN;
439 str = r_str[strcmp(p, "syscall") == 0];
440 }
441
442 /*
443 * If we're going to indent this, we need to check the ID of our last
444 * call. If we're looking at the same probe ID but a different EPID,
445 * we _don't_ want to indent. (Yes, there are some minor holes in
446 * this scheme -- it's a heuristic.)
447 */
448 if (flow == DTRACEFLOW_ENTRY) {
449 if ((last != DTRACE_EPIDNONE && id != last &&
450 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id))
451 flow = DTRACEFLOW_NONE;
452 }
453
454 /*
455 * If we're going to unindent this, it's more difficult to see if
456 * we don't actually want to unindent it -- we need to look at the
457 * _next_ EPID.
458 */
459 if (flow == DTRACEFLOW_RETURN) {
460 offs += epd->dtepd_size;
461
462 do {
463 if (offs >= buf->dtbd_size)
464 goto out;
465
466 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
467
468 if (next == DTRACE_EPIDNONE)
469 offs += sizeof (id);
470 } while (next == DTRACE_EPIDNONE);
471
472 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0)
473 return (rval);
474
475 if (next != id && npd->dtpd_id == pd->dtpd_id)
476 flow = DTRACEFLOW_NONE;
477 }
478
479 out:
480 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) {
481 data->dtpda_prefix = str;
482 } else {
483 data->dtpda_prefix = "| ";
484 }
485
486 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0)
487 data->dtpda_indent -= 2;
488
489 data->dtpda_flow = flow;
490
491 return (0);
492 }
493
494 static int
dt_nullprobe()495 dt_nullprobe()
496 {
497 return (DTRACE_CONSUME_THIS);
498 }
499
500 static int
dt_nullrec()501 dt_nullrec()
502 {
503 return (DTRACE_CONSUME_NEXT);
504 }
505
506 static void
dt_quantize_total(dtrace_hdl_t * dtp,int64_t datum,long double * total)507 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total)
508 {
509 long double val = dt_fabsl((long double)datum);
510
511 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) {
512 *total += val;
513 return;
514 }
515
516 /*
517 * If we're zooming in on an aggregation, we want the height of the
518 * highest value to be approximately 95% of total bar height -- so we
519 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to
520 * our highest value.
521 */
522 val *= 1 / DTRACE_AGGZOOM_MAX;
523
524 if (*total < val)
525 *total = val;
526 }
527
528 static int
dt_print_quanthdr(dtrace_hdl_t * dtp,FILE * fp,int width)529 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width)
530 {
531 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n",
532 width ? width : 16, width ? "key" : "value",
533 "------------- Distribution -------------", "count"));
534 }
535
536 static int
dt_print_quanthdr_packed(dtrace_hdl_t * dtp,FILE * fp,int width,const dtrace_aggdata_t * aggdata,dtrace_actkind_t action)537 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width,
538 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action)
539 {
540 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin;
541 int minwidth, maxwidth, i;
542
543 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE);
544
545 if (action == DTRACEAGG_QUANTIZE) {
546 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET)
547 min--;
548
549 if (max < DTRACE_QUANTIZE_NBUCKETS - 1)
550 max++;
551
552 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min));
553 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max));
554 } else {
555 maxwidth = 8;
556 minwidth = maxwidth - 1;
557 max++;
558 }
559
560 if (dt_printf(dtp, fp, "\n%*s %*s .",
561 width, width > 0 ? "key" : "", minwidth, "min") < 0)
562 return (-1);
563
564 for (i = min; i <= max; i++) {
565 if (dt_printf(dtp, fp, "-") < 0)
566 return (-1);
567 }
568
569 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max"));
570 }
571
572 /*
573 * We use a subset of the Unicode Block Elements (U+2588 through U+258F,
574 * inclusive) to represent aggregations via UTF-8 -- which are expressed via
575 * 3-byte UTF-8 sequences.
576 */
577 #define DTRACE_AGGUTF8_FULL 0x2588
578 #define DTRACE_AGGUTF8_BASE 0x258f
579 #define DTRACE_AGGUTF8_LEVELS 8
580
581 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12))
582 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f))
583 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f))
584
585 static int
dt_print_quantline_utf8(dtrace_hdl_t * dtp,FILE * fp,int64_t val,uint64_t normal,long double total)586 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val,
587 uint64_t normal, long double total)
588 {
589 uint_t len = 40, i, whole, partial;
590 long double f = (dt_fabsl((long double)val) * len) / total;
591 const char *spaces = " ";
592
593 whole = (uint_t)f;
594 partial = (uint_t)((f - (long double)(uint_t)f) *
595 (long double)DTRACE_AGGUTF8_LEVELS);
596
597 if (dt_printf(dtp, fp, "|") < 0)
598 return (-1);
599
600 for (i = 0; i < whole; i++) {
601 if (dt_printf(dtp, fp, "%c%c%c",
602 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL),
603 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL),
604 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0)
605 return (-1);
606 }
607
608 if (partial != 0) {
609 partial = DTRACE_AGGUTF8_BASE - (partial - 1);
610
611 if (dt_printf(dtp, fp, "%c%c%c",
612 DTRACE_AGGUTF8_BYTE0(partial),
613 DTRACE_AGGUTF8_BYTE1(partial),
614 DTRACE_AGGUTF8_BYTE2(partial)) < 0)
615 return (-1);
616
617 i++;
618 }
619
620 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i,
621 (long long)val / normal));
622 }
623
624 static int
dt_print_quantline(dtrace_hdl_t * dtp,FILE * fp,int64_t val,uint64_t normal,long double total,char positives,char negatives)625 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val,
626 uint64_t normal, long double total, char positives, char negatives)
627 {
628 long double f;
629 uint_t depth, len = 40;
630
631 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@";
632 const char *spaces = " ";
633
634 assert(strlen(ats) == len && strlen(spaces) == len);
635 assert(!(total == 0 && (positives || negatives)));
636 assert(!(val < 0 && !negatives));
637 assert(!(val > 0 && !positives));
638 assert(!(val != 0 && total == 0));
639
640 if (!negatives) {
641 if (positives) {
642 if (dtp->dt_encoding == DT_ENCODING_UTF8) {
643 return (dt_print_quantline_utf8(dtp, fp, val,
644 normal, total));
645 }
646
647 f = (dt_fabsl((long double)val) * len) / total;
648 depth = (uint_t)(f + 0.5);
649 } else {
650 depth = 0;
651 }
652
653 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth,
654 spaces + depth, (long long)val / normal));
655 }
656
657 if (!positives) {
658 f = (dt_fabsl((long double)val) * len) / total;
659 depth = (uint_t)(f + 0.5);
660
661 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth,
662 ats + len - depth, (long long)val / normal));
663 }
664
665 /*
666 * If we're here, we have both positive and negative bucket values.
667 * To express this graphically, we're going to generate both positive
668 * and negative bars separated by a centerline. These bars are half
669 * the size of normal quantize()/lquantize() bars, so we divide the
670 * length in half before calculating the bar length.
671 */
672 len /= 2;
673 ats = &ats[len];
674 spaces = &spaces[len];
675
676 f = (dt_fabsl((long double)val) * len) / total;
677 depth = (uint_t)(f + 0.5);
678
679 if (val <= 0) {
680 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth,
681 ats + len - depth, len, "", (long long)val / normal));
682 } else {
683 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "",
684 ats + len - depth, spaces + depth,
685 (long long)val / normal));
686 }
687 }
688
689 /*
690 * As with UTF-8 printing of aggregations, we use a subset of the Unicode
691 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed
692 * aggregation.
693 */
694 #define DTRACE_AGGPACK_BASE 0x2581
695 #define DTRACE_AGGPACK_LEVELS 8
696
697 static int
dt_print_packed(dtrace_hdl_t * dtp,FILE * fp,long double datum,long double total)698 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp,
699 long double datum, long double total)
700 {
701 static boolean_t utf8_checked = B_FALSE;
702 static boolean_t utf8;
703 char *ascii = "__xxxxXX";
704 char *neg = "vvvvVV";
705 unsigned int len;
706 long double val;
707
708 if (!utf8_checked) {
709 char *term;
710
711 /*
712 * We want to determine if we can reasonably emit UTF-8 for our
713 * packed aggregation. To do this, we will check for terminals
714 * that are known to be primitive to emit UTF-8 on these.
715 */
716 utf8_checked = B_TRUE;
717
718 if (dtp->dt_encoding == DT_ENCODING_ASCII) {
719 utf8 = B_FALSE;
720 } else if (dtp->dt_encoding == DT_ENCODING_UTF8) {
721 utf8 = B_TRUE;
722 } else if ((term = getenv("TERM")) != NULL &&
723 (strcmp(term, "sun") == 0 ||
724 strcmp(term, "sun-color") == 0) ||
725 strcmp(term, "dumb") == 0) {
726 utf8 = B_FALSE;
727 } else {
728 utf8 = B_TRUE;
729 }
730 }
731
732 if (datum == 0)
733 return (dt_printf(dtp, fp, " "));
734
735 if (datum < 0) {
736 len = strlen(neg);
737 val = dt_fabsl(datum * (len - 1)) / total;
738 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)]));
739 }
740
741 if (utf8) {
742 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum *
743 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5);
744
745 return (dt_printf(dtp, fp, "%c%c%c",
746 DTRACE_AGGUTF8_BYTE0(block),
747 DTRACE_AGGUTF8_BYTE1(block),
748 DTRACE_AGGUTF8_BYTE2(block)));
749 }
750
751 len = strlen(ascii);
752 val = (datum * (len - 1)) / total;
753 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)]));
754 }
755
756 int
dt_print_quantize(dtrace_hdl_t * dtp,FILE * fp,const void * addr,size_t size,uint64_t normal)757 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
758 size_t size, uint64_t normal)
759 {
760 const int64_t *data = addr;
761 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1;
762 long double total = 0;
763 char positives = 0, negatives = 0;
764
765 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
766 return (dt_set_errno(dtp, EDT_DMISMATCH));
767
768 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0)
769 first_bin++;
770
771 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) {
772 /*
773 * There isn't any data. This is possible if the aggregation
774 * has been clear()'d or if negative increment values have been
775 * used. Regardless, we'll print the buckets around 0.
776 */
777 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1;
778 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1;
779 } else {
780 if (first_bin > 0)
781 first_bin--;
782
783 while (last_bin > 0 && data[last_bin] == 0)
784 last_bin--;
785
786 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1)
787 last_bin++;
788 }
789
790 for (i = first_bin; i <= last_bin; i++) {
791 positives |= (data[i] > 0);
792 negatives |= (data[i] < 0);
793 dt_quantize_total(dtp, data[i], &total);
794 }
795
796 if (dt_print_quanthdr(dtp, fp, 0) < 0)
797 return (-1);
798
799 for (i = first_bin; i <= last_bin; i++) {
800 if (dt_printf(dtp, fp, "%16lld ",
801 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0)
802 return (-1);
803
804 if (dt_print_quantline(dtp, fp, data[i], normal, total,
805 positives, negatives) < 0)
806 return (-1);
807 }
808
809 return (0);
810 }
811
812 int
dt_print_quantize_packed(dtrace_hdl_t * dtp,FILE * fp,const void * addr,size_t size,const dtrace_aggdata_t * aggdata)813 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
814 size_t size, const dtrace_aggdata_t *aggdata)
815 {
816 const int64_t *data = addr;
817 long double total = 0, count = 0;
818 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i;
819 int64_t minval, maxval;
820
821 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
822 return (dt_set_errno(dtp, EDT_DMISMATCH));
823
824 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET)
825 min--;
826
827 if (max < DTRACE_QUANTIZE_NBUCKETS - 1)
828 max++;
829
830 minval = DTRACE_QUANTIZE_BUCKETVAL(min);
831 maxval = DTRACE_QUANTIZE_BUCKETVAL(max);
832
833 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval),
834 (long long)minval) < 0)
835 return (-1);
836
837 for (i = min; i <= max; i++) {
838 dt_quantize_total(dtp, data[i], &total);
839 count += data[i];
840 }
841
842 for (i = min; i <= max; i++) {
843 if (dt_print_packed(dtp, fp, data[i], total) < 0)
844 return (-1);
845 }
846
847 if (dt_printf(dtp, fp, ": %*lld | %lld\n",
848 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0)
849 return (-1);
850
851 return (0);
852 }
853
854 int
dt_print_lquantize(dtrace_hdl_t * dtp,FILE * fp,const void * addr,size_t size,uint64_t normal)855 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
856 size_t size, uint64_t normal)
857 {
858 const int64_t *data = addr;
859 int i, first_bin, last_bin, base;
860 uint64_t arg;
861 long double total = 0;
862 uint16_t step, levels;
863 char positives = 0, negatives = 0;
864
865 if (size < sizeof (uint64_t))
866 return (dt_set_errno(dtp, EDT_DMISMATCH));
867
868 arg = *data++;
869 size -= sizeof (uint64_t);
870
871 base = DTRACE_LQUANTIZE_BASE(arg);
872 step = DTRACE_LQUANTIZE_STEP(arg);
873 levels = DTRACE_LQUANTIZE_LEVELS(arg);
874
875 first_bin = 0;
876 last_bin = levels + 1;
877
878 if (size != sizeof (uint64_t) * (levels + 2))
879 return (dt_set_errno(dtp, EDT_DMISMATCH));
880
881 while (first_bin <= levels + 1 && data[first_bin] == 0)
882 first_bin++;
883
884 if (first_bin > levels + 1) {
885 first_bin = 0;
886 last_bin = 2;
887 } else {
888 if (first_bin > 0)
889 first_bin--;
890
891 while (last_bin > 0 && data[last_bin] == 0)
892 last_bin--;
893
894 if (last_bin < levels + 1)
895 last_bin++;
896 }
897
898 for (i = first_bin; i <= last_bin; i++) {
899 positives |= (data[i] > 0);
900 negatives |= (data[i] < 0);
901 dt_quantize_total(dtp, data[i], &total);
902 }
903
904 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
905 "------------- Distribution -------------", "count") < 0)
906 return (-1);
907
908 for (i = first_bin; i <= last_bin; i++) {
909 char c[32];
910 int err;
911
912 if (i == 0) {
913 (void) snprintf(c, sizeof (c), "< %d", base);
914 err = dt_printf(dtp, fp, "%16s ", c);
915 } else if (i == levels + 1) {
916 (void) snprintf(c, sizeof (c), ">= %d",
917 base + (levels * step));
918 err = dt_printf(dtp, fp, "%16s ", c);
919 } else {
920 err = dt_printf(dtp, fp, "%16d ",
921 base + (i - 1) * step);
922 }
923
924 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal,
925 total, positives, negatives) < 0)
926 return (-1);
927 }
928
929 return (0);
930 }
931
932 /*ARGSUSED*/
933 int
dt_print_lquantize_packed(dtrace_hdl_t * dtp,FILE * fp,const void * addr,size_t size,const dtrace_aggdata_t * aggdata)934 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
935 size_t size, const dtrace_aggdata_t *aggdata)
936 {
937 const int64_t *data = addr;
938 long double total = 0, count = 0;
939 int min, max, base, err;
940 uint64_t arg;
941 uint16_t step, levels;
942 char c[32];
943 unsigned int i;
944
945 if (size < sizeof (uint64_t))
946 return (dt_set_errno(dtp, EDT_DMISMATCH));
947
948 arg = *data++;
949 size -= sizeof (uint64_t);
950
951 base = DTRACE_LQUANTIZE_BASE(arg);
952 step = DTRACE_LQUANTIZE_STEP(arg);
953 levels = DTRACE_LQUANTIZE_LEVELS(arg);
954
955 if (size != sizeof (uint64_t) * (levels + 2))
956 return (dt_set_errno(dtp, EDT_DMISMATCH));
957
958 min = 0;
959 max = levels + 1;
960
961 if (min == 0) {
962 (void) snprintf(c, sizeof (c), "< %d", base);
963 err = dt_printf(dtp, fp, "%8s :", c);
964 } else {
965 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step);
966 }
967
968 if (err < 0)
969 return (-1);
970
971 for (i = min; i <= max; i++) {
972 dt_quantize_total(dtp, data[i], &total);
973 count += data[i];
974 }
975
976 for (i = min; i <= max; i++) {
977 if (dt_print_packed(dtp, fp, data[i], total) < 0)
978 return (-1);
979 }
980
981 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step));
982 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count));
983 }
984
985 int
dt_print_llquantize(dtrace_hdl_t * dtp,FILE * fp,const void * addr,size_t size,uint64_t normal)986 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
987 size_t size, uint64_t normal)
988 {
989 int i, first_bin, last_bin, bin = 1, order, levels;
990 uint16_t factor, low, high, nsteps;
991 const int64_t *data = addr;
992 int64_t value = 1, next, step;
993 char positives = 0, negatives = 0;
994 long double total = 0;
995 uint64_t arg;
996 char c[32];
997
998 if (size < sizeof (uint64_t))
999 return (dt_set_errno(dtp, EDT_DMISMATCH));
1000
1001 arg = *data++;
1002 size -= sizeof (uint64_t);
1003
1004 factor = DTRACE_LLQUANTIZE_FACTOR(arg);
1005 low = DTRACE_LLQUANTIZE_LOW(arg);
1006 high = DTRACE_LLQUANTIZE_HIGH(arg);
1007 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
1008
1009 /*
1010 * We don't expect to be handed invalid llquantize() parameters here,
1011 * but sanity check them (to a degree) nonetheless.
1012 */
1013 if (size > INT32_MAX || factor < 2 || low >= high ||
1014 nsteps == 0 || factor > nsteps)
1015 return (dt_set_errno(dtp, EDT_DMISMATCH));
1016
1017 levels = (int)size / sizeof (uint64_t);
1018
1019 first_bin = 0;
1020 last_bin = levels - 1;
1021
1022 while (first_bin < levels && data[first_bin] == 0)
1023 first_bin++;
1024
1025 if (first_bin == levels) {
1026 first_bin = 0;
1027 last_bin = 1;
1028 } else {
1029 if (first_bin > 0)
1030 first_bin--;
1031
1032 while (last_bin > 0 && data[last_bin] == 0)
1033 last_bin--;
1034
1035 if (last_bin < levels - 1)
1036 last_bin++;
1037 }
1038
1039 for (i = first_bin; i <= last_bin; i++) {
1040 positives |= (data[i] > 0);
1041 negatives |= (data[i] < 0);
1042 dt_quantize_total(dtp, data[i], &total);
1043 }
1044
1045 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
1046 "------------- Distribution -------------", "count") < 0)
1047 return (-1);
1048
1049 for (order = 0; order < low; order++)
1050 value *= factor;
1051
1052 next = value * factor;
1053 step = next > nsteps ? next / nsteps : 1;
1054
1055 if (first_bin == 0) {
1056 (void) snprintf(c, sizeof (c), "< %lld", value);
1057
1058 if (dt_printf(dtp, fp, "%16s ", c) < 0)
1059 return (-1);
1060
1061 if (dt_print_quantline(dtp, fp, data[0], normal,
1062 total, positives, negatives) < 0)
1063 return (-1);
1064 }
1065
1066 while (order <= high) {
1067 if (bin >= first_bin && bin <= last_bin) {
1068 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0)
1069 return (-1);
1070
1071 if (dt_print_quantline(dtp, fp, data[bin],
1072 normal, total, positives, negatives) < 0)
1073 return (-1);
1074 }
1075
1076 assert(value < next);
1077 bin++;
1078
1079 if ((value += step) != next)
1080 continue;
1081
1082 next = value * factor;
1083 step = next > nsteps ? next / nsteps : 1;
1084 order++;
1085 }
1086
1087 if (last_bin < bin)
1088 return (0);
1089
1090 assert(last_bin == bin);
1091 (void) snprintf(c, sizeof (c), ">= %lld", value);
1092
1093 if (dt_printf(dtp, fp, "%16s ", c) < 0)
1094 return (-1);
1095
1096 return (dt_print_quantline(dtp, fp, data[bin], normal,
1097 total, positives, negatives));
1098 }
1099
1100 /*ARGSUSED*/
1101 static int
dt_print_average(dtrace_hdl_t * dtp,FILE * fp,caddr_t addr,size_t size,uint64_t normal)1102 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1103 size_t size, uint64_t normal)
1104 {
1105 /* LINTED - alignment */
1106 int64_t *data = (int64_t *)addr;
1107
1108 return (dt_printf(dtp, fp, " %16lld", data[0] ?
1109 (long long)(data[1] / (int64_t)normal / data[0]) : 0));
1110 }
1111
1112 /*ARGSUSED*/
1113 static int
dt_print_stddev(dtrace_hdl_t * dtp,FILE * fp,caddr_t addr,size_t size,uint64_t normal)1114 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1115 size_t size, uint64_t normal)
1116 {
1117 /* LINTED - alignment */
1118 uint64_t *data = (uint64_t *)addr;
1119
1120 return (dt_printf(dtp, fp, " %16llu", data[0] ?
1121 (unsigned long long) dt_stddev(data, normal) : 0));
1122 }
1123
1124 /*ARGSUSED*/
1125 static int
dt_print_bytes(dtrace_hdl_t * dtp,FILE * fp,caddr_t addr,size_t nbytes,int width,int quiet,int forceraw)1126 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1127 size_t nbytes, int width, int quiet, int forceraw)
1128 {
1129 /*
1130 * If the byte stream is a series of printable characters, followed by
1131 * a terminating byte, we print it out as a string. Otherwise, we
1132 * assume that it's something else and just print the bytes.
1133 */
1134 int i, j, margin = 5;
1135 char *c = (char *)addr;
1136
1137 if (nbytes == 0)
1138 return (0);
1139
1140 if (forceraw)
1141 goto raw;
1142
1143 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET)
1144 goto raw;
1145
1146 for (i = 0; i < nbytes; i++) {
1147 /*
1148 * We define a "printable character" to be one for which
1149 * isprint(3C) returns non-zero, isspace(3C) returns non-zero,
1150 * or a character which is either backspace or the bell.
1151 * Backspace and the bell are regrettably special because
1152 * they fail the first two tests -- and yet they are entirely
1153 * printable. These are the only two control characters that
1154 * have meaning for the terminal and for which isprint(3C) and
1155 * isspace(3C) return 0.
1156 */
1157 if (isprint(c[i]) || isspace(c[i]) ||
1158 c[i] == '\b' || c[i] == '\a')
1159 continue;
1160
1161 if (c[i] == '\0' && i > 0) {
1162 /*
1163 * This looks like it might be a string. Before we
1164 * assume that it is indeed a string, check the
1165 * remainder of the byte range; if it contains
1166 * additional non-nul characters, we'll assume that
1167 * it's a binary stream that just happens to look like
1168 * a string, and we'll print out the individual bytes.
1169 */
1170 for (j = i + 1; j < nbytes; j++) {
1171 if (c[j] != '\0')
1172 break;
1173 }
1174
1175 if (j != nbytes)
1176 break;
1177
1178 if (quiet) {
1179 return (dt_printf(dtp, fp, "%s", c));
1180 } else {
1181 return (dt_printf(dtp, fp, " %s%*s",
1182 width < 0 ? " " : "", width, c));
1183 }
1184 }
1185
1186 break;
1187 }
1188
1189 if (i == nbytes) {
1190 /*
1191 * The byte range is all printable characters, but there is
1192 * no trailing nul byte. We'll assume that it's a string and
1193 * print it as such.
1194 */
1195 char *s = alloca(nbytes + 1);
1196 bcopy(c, s, nbytes);
1197 s[nbytes] = '\0';
1198 return (dt_printf(dtp, fp, " %-*s", width, s));
1199 }
1200
1201 raw:
1202 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0)
1203 return (-1);
1204
1205 for (i = 0; i < 16; i++)
1206 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0)
1207 return (-1);
1208
1209 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0)
1210 return (-1);
1211
1212
1213 for (i = 0; i < nbytes; i += 16) {
1214 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0)
1215 return (-1);
1216
1217 for (j = i; j < i + 16 && j < nbytes; j++) {
1218 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0)
1219 return (-1);
1220 }
1221
1222 while (j++ % 16) {
1223 if (dt_printf(dtp, fp, " ") < 0)
1224 return (-1);
1225 }
1226
1227 if (dt_printf(dtp, fp, " ") < 0)
1228 return (-1);
1229
1230 for (j = i; j < i + 16 && j < nbytes; j++) {
1231 if (dt_printf(dtp, fp, "%c",
1232 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0)
1233 return (-1);
1234 }
1235
1236 if (dt_printf(dtp, fp, "\n") < 0)
1237 return (-1);
1238 }
1239
1240 return (0);
1241 }
1242
1243 int
dt_print_stack(dtrace_hdl_t * dtp,FILE * fp,const char * format,caddr_t addr,int depth,int size)1244 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
1245 caddr_t addr, int depth, int size)
1246 {
1247 dtrace_syminfo_t dts;
1248 GElf_Sym sym;
1249 int i, indent;
1250 char c[PATH_MAX * 2];
1251 uint64_t pc;
1252
1253 if (dt_printf(dtp, fp, "\n") < 0)
1254 return (-1);
1255
1256 if (format == NULL)
1257 format = "%s";
1258
1259 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
1260 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
1261 else
1262 indent = _dtrace_stkindent;
1263
1264 for (i = 0; i < depth; i++) {
1265 switch (size) {
1266 case sizeof (uint32_t):
1267 /* LINTED - alignment */
1268 pc = *((uint32_t *)addr);
1269 break;
1270
1271 case sizeof (uint64_t):
1272 /* LINTED - alignment */
1273 pc = *((uint64_t *)addr);
1274 break;
1275
1276 default:
1277 return (dt_set_errno(dtp, EDT_BADSTACKPC));
1278 }
1279
1280 if (pc == 0)
1281 break;
1282
1283 addr += size;
1284
1285 if (dt_printf(dtp, fp, "%*s", indent, "") < 0)
1286 return (-1);
1287
1288 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
1289 if (pc > sym.st_value) {
1290 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx",
1291 dts.dts_object, dts.dts_name,
1292 pc - sym.st_value);
1293 } else {
1294 (void) snprintf(c, sizeof (c), "%s`%s",
1295 dts.dts_object, dts.dts_name);
1296 }
1297 } else {
1298 /*
1299 * We'll repeat the lookup, but this time we'll specify
1300 * a NULL GElf_Sym -- indicating that we're only
1301 * interested in the containing module.
1302 */
1303 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1304 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1305 dts.dts_object, pc);
1306 } else {
1307 (void) snprintf(c, sizeof (c), "0x%llx", pc);
1308 }
1309 }
1310
1311 if (dt_printf(dtp, fp, format, c) < 0)
1312 return (-1);
1313
1314 if (dt_printf(dtp, fp, "\n") < 0)
1315 return (-1);
1316 }
1317
1318 return (0);
1319 }
1320
1321 int
dt_print_ustack(dtrace_hdl_t * dtp,FILE * fp,const char * format,caddr_t addr,uint64_t arg)1322 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
1323 caddr_t addr, uint64_t arg)
1324 {
1325 /* LINTED - alignment */
1326 uint64_t *pc = (uint64_t *)addr;
1327 uint32_t depth = DTRACE_USTACK_NFRAMES(arg);
1328 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg);
1329 const char *strbase = addr + (depth + 1) * sizeof (uint64_t);
1330 const char *str = strsize ? strbase : NULL;
1331 int err = 0;
1332
1333 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2];
1334 struct ps_prochandle *P;
1335 GElf_Sym sym;
1336 int i, indent;
1337 pid_t pid;
1338
1339 if (depth == 0)
1340 return (0);
1341
1342 pid = (pid_t)*pc++;
1343
1344 if (dt_printf(dtp, fp, "\n") < 0)
1345 return (-1);
1346
1347 if (format == NULL)
1348 format = "%s";
1349
1350 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
1351 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
1352 else
1353 indent = _dtrace_stkindent;
1354
1355 /*
1356 * Ultimately, we need to add an entry point in the library vector for
1357 * determining <symbol, offset> from <pid, address>. For now, if
1358 * this is a vector open, we just print the raw address or string.
1359 */
1360 if (dtp->dt_vector == NULL)
1361 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1362 else
1363 P = NULL;
1364
1365 if (P != NULL)
1366 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1367
1368 for (i = 0; i < depth && pc[i] != 0; i++) {
1369 const prmap_t *map;
1370
1371 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1372 break;
1373
1374 if (P != NULL && Plookup_by_addr(P, pc[i],
1375 name, sizeof (name), &sym) == 0) {
1376 (void) Pobjname(P, pc[i], objname, sizeof (objname));
1377
1378 if (pc[i] > sym.st_value) {
1379 (void) snprintf(c, sizeof (c),
1380 "%s`%s+0x%llx", dt_basename(objname), name,
1381 (u_longlong_t)(pc[i] - sym.st_value));
1382 } else {
1383 (void) snprintf(c, sizeof (c),
1384 "%s`%s", dt_basename(objname), name);
1385 }
1386 } else if (str != NULL && str[0] != '\0' && str[0] != '@' &&
1387 (P == NULL || (map = Paddr_to_map(P, pc[i])) == NULL ||
1388 map->pr_mflags & MA_WRITE)) {
1389 /*
1390 * If the current string pointer in the string table
1391 * does not point to an empty string _and_ the program
1392 * counter falls in a writable region, we'll use the
1393 * string from the string table instead of the raw
1394 * address. This last condition is necessary because
1395 * some (broken) ustack helpers will return a string
1396 * even for a program counter that they can't
1397 * identify. If we have a string for a program
1398 * counter that falls in a segment that isn't
1399 * writable, we assume that we have fallen into this
1400 * case and we refuse to use the string. Finally,
1401 * note that if we could not grab the process (e.g.,
1402 * because it exited), the information from the helper
1403 * is better than nothing.
1404 */
1405 (void) snprintf(c, sizeof (c), "%s", str);
1406 } else {
1407 if (P != NULL && Pobjname(P, pc[i], objname,
1408 sizeof (objname)) != NULL) {
1409 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1410 dt_basename(objname), (u_longlong_t)pc[i]);
1411 } else {
1412 (void) snprintf(c, sizeof (c), "0x%llx",
1413 (u_longlong_t)pc[i]);
1414 }
1415 }
1416
1417 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1418 break;
1419
1420 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1421 break;
1422
1423 if (str != NULL && str[0] == '@') {
1424 /*
1425 * If the first character of the string is an "at" sign,
1426 * then the string is inferred to be an annotation --
1427 * and it is printed out beneath the frame and offset
1428 * with brackets.
1429 */
1430 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1431 break;
1432
1433 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]);
1434
1435 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1436 break;
1437
1438 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1439 break;
1440 }
1441
1442 if (str != NULL) {
1443 str += strlen(str) + 1;
1444 if (str - strbase >= strsize)
1445 str = NULL;
1446 }
1447 }
1448
1449 if (P != NULL) {
1450 dt_proc_unlock(dtp, P);
1451 dt_proc_release(dtp, P);
1452 }
1453
1454 return (err);
1455 }
1456
1457 static int
dt_print_usym(dtrace_hdl_t * dtp,FILE * fp,caddr_t addr,dtrace_actkind_t act)1458 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act)
1459 {
1460 /* LINTED - alignment */
1461 uint64_t pid = ((uint64_t *)addr)[0];
1462 /* LINTED - alignment */
1463 uint64_t pc = ((uint64_t *)addr)[1];
1464 const char *format = " %-50s";
1465 char *s;
1466 int n, len = 256;
1467
1468 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) {
1469 struct ps_prochandle *P;
1470
1471 if ((P = dt_proc_grab(dtp, pid,
1472 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) {
1473 GElf_Sym sym;
1474
1475 dt_proc_lock(dtp, P);
1476
1477 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0)
1478 pc = sym.st_value;
1479
1480 dt_proc_unlock(dtp, P);
1481 dt_proc_release(dtp, P);
1482 }
1483 }
1484
1485 do {
1486 n = len;
1487 s = alloca(n);
1488 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n);
1489
1490 return (dt_printf(dtp, fp, format, s));
1491 }
1492
1493 int
dt_print_umod(dtrace_hdl_t * dtp,FILE * fp,const char * format,caddr_t addr)1494 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1495 {
1496 /* LINTED - alignment */
1497 uint64_t pid = ((uint64_t *)addr)[0];
1498 /* LINTED - alignment */
1499 uint64_t pc = ((uint64_t *)addr)[1];
1500 int err = 0;
1501
1502 char objname[PATH_MAX], c[PATH_MAX * 2];
1503 struct ps_prochandle *P;
1504
1505 if (format == NULL)
1506 format = " %-50s";
1507
1508 /*
1509 * See the comment in dt_print_ustack() for the rationale for
1510 * printing raw addresses in the vectored case.
1511 */
1512 if (dtp->dt_vector == NULL)
1513 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1514 else
1515 P = NULL;
1516
1517 if (P != NULL)
1518 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1519
1520 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != NULL) {
1521 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname));
1522 } else {
1523 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1524 }
1525
1526 err = dt_printf(dtp, fp, format, c);
1527
1528 if (P != NULL) {
1529 dt_proc_unlock(dtp, P);
1530 dt_proc_release(dtp, P);
1531 }
1532
1533 return (err);
1534 }
1535
1536 static int
dt_print_sym(dtrace_hdl_t * dtp,FILE * fp,const char * format,caddr_t addr)1537 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1538 {
1539 /* LINTED - alignment */
1540 uint64_t pc = *((uint64_t *)addr);
1541 dtrace_syminfo_t dts;
1542 GElf_Sym sym;
1543 char c[PATH_MAX * 2];
1544
1545 if (format == NULL)
1546 format = " %-50s";
1547
1548 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
1549 (void) snprintf(c, sizeof (c), "%s`%s",
1550 dts.dts_object, dts.dts_name);
1551 } else {
1552 /*
1553 * We'll repeat the lookup, but this time we'll specify a
1554 * NULL GElf_Sym -- indicating that we're only interested in
1555 * the containing module.
1556 */
1557 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1558 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1559 dts.dts_object, (u_longlong_t)pc);
1560 } else {
1561 (void) snprintf(c, sizeof (c), "0x%llx",
1562 (u_longlong_t)pc);
1563 }
1564 }
1565
1566 if (dt_printf(dtp, fp, format, c) < 0)
1567 return (-1);
1568
1569 return (0);
1570 }
1571
1572 int
dt_print_mod(dtrace_hdl_t * dtp,FILE * fp,const char * format,caddr_t addr)1573 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1574 {
1575 /* LINTED - alignment */
1576 uint64_t pc = *((uint64_t *)addr);
1577 dtrace_syminfo_t dts;
1578 char c[PATH_MAX * 2];
1579
1580 if (format == NULL)
1581 format = " %-50s";
1582
1583 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1584 (void) snprintf(c, sizeof (c), "%s", dts.dts_object);
1585 } else {
1586 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1587 }
1588
1589 if (dt_printf(dtp, fp, format, c) < 0)
1590 return (-1);
1591
1592 return (0);
1593 }
1594
1595 typedef struct dt_normal {
1596 dtrace_aggvarid_t dtnd_id;
1597 uint64_t dtnd_normal;
1598 } dt_normal_t;
1599
1600 static int
dt_normalize_agg(const dtrace_aggdata_t * aggdata,void * arg)1601 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1602 {
1603 dt_normal_t *normal = arg;
1604 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1605 dtrace_aggvarid_t id = normal->dtnd_id;
1606
1607 if (agg->dtagd_nrecs == 0)
1608 return (DTRACE_AGGWALK_NEXT);
1609
1610 if (agg->dtagd_varid != id)
1611 return (DTRACE_AGGWALK_NEXT);
1612
1613 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal;
1614 return (DTRACE_AGGWALK_NORMALIZE);
1615 }
1616
1617 static int
dt_normalize(dtrace_hdl_t * dtp,caddr_t base,dtrace_recdesc_t * rec)1618 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1619 {
1620 dt_normal_t normal;
1621 caddr_t addr;
1622
1623 /*
1624 * We (should) have two records: the aggregation ID followed by the
1625 * normalization value.
1626 */
1627 addr = base + rec->dtrd_offset;
1628
1629 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1630 return (dt_set_errno(dtp, EDT_BADNORMAL));
1631
1632 /* LINTED - alignment */
1633 normal.dtnd_id = *((dtrace_aggvarid_t *)addr);
1634 rec++;
1635
1636 if (rec->dtrd_action != DTRACEACT_LIBACT)
1637 return (dt_set_errno(dtp, EDT_BADNORMAL));
1638
1639 if (rec->dtrd_arg != DT_ACT_NORMALIZE)
1640 return (dt_set_errno(dtp, EDT_BADNORMAL));
1641
1642 addr = base + rec->dtrd_offset;
1643
1644 switch (rec->dtrd_size) {
1645 case sizeof (uint64_t):
1646 /* LINTED - alignment */
1647 normal.dtnd_normal = *((uint64_t *)addr);
1648 break;
1649 case sizeof (uint32_t):
1650 /* LINTED - alignment */
1651 normal.dtnd_normal = *((uint32_t *)addr);
1652 break;
1653 case sizeof (uint16_t):
1654 /* LINTED - alignment */
1655 normal.dtnd_normal = *((uint16_t *)addr);
1656 break;
1657 case sizeof (uint8_t):
1658 normal.dtnd_normal = *((uint8_t *)addr);
1659 break;
1660 default:
1661 return (dt_set_errno(dtp, EDT_BADNORMAL));
1662 }
1663
1664 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal);
1665
1666 return (0);
1667 }
1668
1669 static int
dt_denormalize_agg(const dtrace_aggdata_t * aggdata,void * arg)1670 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1671 {
1672 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1673 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1674
1675 if (agg->dtagd_nrecs == 0)
1676 return (DTRACE_AGGWALK_NEXT);
1677
1678 if (agg->dtagd_varid != id)
1679 return (DTRACE_AGGWALK_NEXT);
1680
1681 return (DTRACE_AGGWALK_DENORMALIZE);
1682 }
1683
1684 static int
dt_clear_agg(const dtrace_aggdata_t * aggdata,void * arg)1685 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg)
1686 {
1687 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1688 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1689
1690 if (agg->dtagd_nrecs == 0)
1691 return (DTRACE_AGGWALK_NEXT);
1692
1693 if (agg->dtagd_varid != id)
1694 return (DTRACE_AGGWALK_NEXT);
1695
1696 return (DTRACE_AGGWALK_CLEAR);
1697 }
1698
1699 typedef struct dt_trunc {
1700 dtrace_aggvarid_t dttd_id;
1701 uint64_t dttd_remaining;
1702 } dt_trunc_t;
1703
1704 static int
dt_trunc_agg(const dtrace_aggdata_t * aggdata,void * arg)1705 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg)
1706 {
1707 dt_trunc_t *trunc = arg;
1708 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1709 dtrace_aggvarid_t id = trunc->dttd_id;
1710
1711 if (agg->dtagd_nrecs == 0)
1712 return (DTRACE_AGGWALK_NEXT);
1713
1714 if (agg->dtagd_varid != id)
1715 return (DTRACE_AGGWALK_NEXT);
1716
1717 if (trunc->dttd_remaining == 0)
1718 return (DTRACE_AGGWALK_REMOVE);
1719
1720 trunc->dttd_remaining--;
1721 return (DTRACE_AGGWALK_NEXT);
1722 }
1723
1724 static int
dt_trunc(dtrace_hdl_t * dtp,caddr_t base,dtrace_recdesc_t * rec)1725 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1726 {
1727 dt_trunc_t trunc;
1728 caddr_t addr;
1729 int64_t remaining;
1730 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *);
1731
1732 /*
1733 * We (should) have two records: the aggregation ID followed by the
1734 * number of aggregation entries after which the aggregation is to be
1735 * truncated.
1736 */
1737 addr = base + rec->dtrd_offset;
1738
1739 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1740 return (dt_set_errno(dtp, EDT_BADTRUNC));
1741
1742 /* LINTED - alignment */
1743 trunc.dttd_id = *((dtrace_aggvarid_t *)addr);
1744 rec++;
1745
1746 if (rec->dtrd_action != DTRACEACT_LIBACT)
1747 return (dt_set_errno(dtp, EDT_BADTRUNC));
1748
1749 if (rec->dtrd_arg != DT_ACT_TRUNC)
1750 return (dt_set_errno(dtp, EDT_BADTRUNC));
1751
1752 addr = base + rec->dtrd_offset;
1753
1754 switch (rec->dtrd_size) {
1755 case sizeof (uint64_t):
1756 /* LINTED - alignment */
1757 remaining = *((int64_t *)addr);
1758 break;
1759 case sizeof (uint32_t):
1760 /* LINTED - alignment */
1761 remaining = *((int32_t *)addr);
1762 break;
1763 case sizeof (uint16_t):
1764 /* LINTED - alignment */
1765 remaining = *((int16_t *)addr);
1766 break;
1767 case sizeof (uint8_t):
1768 remaining = *((int8_t *)addr);
1769 break;
1770 default:
1771 return (dt_set_errno(dtp, EDT_BADNORMAL));
1772 }
1773
1774 if (remaining < 0) {
1775 func = dtrace_aggregate_walk_valsorted;
1776 remaining = -remaining;
1777 } else {
1778 func = dtrace_aggregate_walk_valrevsorted;
1779 }
1780
1781 assert(remaining >= 0);
1782 trunc.dttd_remaining = remaining;
1783
1784 (void) func(dtp, dt_trunc_agg, &trunc);
1785
1786 return (0);
1787 }
1788
1789 static int
dt_print_datum(dtrace_hdl_t * dtp,FILE * fp,dtrace_recdesc_t * rec,caddr_t addr,size_t size,const dtrace_aggdata_t * aggdata,uint64_t normal,dt_print_aggdata_t * pd)1790 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec,
1791 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata,
1792 uint64_t normal, dt_print_aggdata_t *pd)
1793 {
1794 int err, width;
1795 dtrace_actkind_t act = rec->dtrd_action;
1796 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack;
1797 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1798
1799 static struct {
1800 size_t size;
1801 int width;
1802 int packedwidth;
1803 } *fmt, fmttab[] = {
1804 { sizeof (uint8_t), 3, 3 },
1805 { sizeof (uint16_t), 5, 5 },
1806 { sizeof (uint32_t), 8, 8 },
1807 { sizeof (uint64_t), 16, 16 },
1808 { 0, -50, 16 }
1809 };
1810
1811 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) {
1812 dtrace_recdesc_t *r;
1813
1814 width = 0;
1815
1816 /*
1817 * To print our quantization header for either an agghist or
1818 * aggpack aggregation, we need to iterate through all of our
1819 * of our records to determine their width.
1820 */
1821 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) {
1822 for (fmt = fmttab; fmt->size &&
1823 fmt->size != r->dtrd_size; fmt++)
1824 continue;
1825
1826 width += fmt->packedwidth + 1;
1827 }
1828
1829 if (pd->dtpa_agghist) {
1830 if (dt_print_quanthdr(dtp, fp, width) < 0)
1831 return (-1);
1832 } else {
1833 if (dt_print_quanthdr_packed(dtp, fp,
1834 width, aggdata, r->dtrd_action) < 0)
1835 return (-1);
1836 }
1837
1838 pd->dtpa_agghisthdr = agg->dtagd_varid;
1839 }
1840
1841 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) {
1842 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES;
1843 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES;
1844 int64_t val;
1845
1846 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT);
1847 val = (long long)*((uint64_t *)addr);
1848
1849 if (dt_printf(dtp, fp, " ") < 0)
1850 return (-1);
1851
1852 return (dt_print_quantline(dtp, fp, val, normal,
1853 aggdata->dtada_total, positives, negatives));
1854 }
1855
1856 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) {
1857 switch (act) {
1858 case DTRACEAGG_QUANTIZE:
1859 return (dt_print_quantize_packed(dtp,
1860 fp, addr, size, aggdata));
1861 case DTRACEAGG_LQUANTIZE:
1862 return (dt_print_lquantize_packed(dtp,
1863 fp, addr, size, aggdata));
1864 default:
1865 break;
1866 }
1867 }
1868
1869 switch (act) {
1870 case DTRACEACT_STACK:
1871 return (dt_print_stack(dtp, fp, NULL, addr,
1872 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg));
1873
1874 case DTRACEACT_USTACK:
1875 case DTRACEACT_JSTACK:
1876 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg));
1877
1878 case DTRACEACT_USYM:
1879 case DTRACEACT_UADDR:
1880 return (dt_print_usym(dtp, fp, addr, act));
1881
1882 case DTRACEACT_UMOD:
1883 return (dt_print_umod(dtp, fp, NULL, addr));
1884
1885 case DTRACEACT_SYM:
1886 return (dt_print_sym(dtp, fp, NULL, addr));
1887
1888 case DTRACEACT_MOD:
1889 return (dt_print_mod(dtp, fp, NULL, addr));
1890
1891 case DTRACEAGG_QUANTIZE:
1892 return (dt_print_quantize(dtp, fp, addr, size, normal));
1893
1894 case DTRACEAGG_LQUANTIZE:
1895 return (dt_print_lquantize(dtp, fp, addr, size, normal));
1896
1897 case DTRACEAGG_LLQUANTIZE:
1898 return (dt_print_llquantize(dtp, fp, addr, size, normal));
1899
1900 case DTRACEAGG_AVG:
1901 return (dt_print_average(dtp, fp, addr, size, normal));
1902
1903 case DTRACEAGG_STDDEV:
1904 return (dt_print_stddev(dtp, fp, addr, size, normal));
1905
1906 default:
1907 break;
1908 }
1909
1910 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++)
1911 continue;
1912
1913 width = packed ? fmt->packedwidth : fmt->width;
1914
1915 switch (size) {
1916 case sizeof (uint64_t):
1917 err = dt_printf(dtp, fp, " %*lld", width,
1918 /* LINTED - alignment */
1919 (long long)*((uint64_t *)addr) / normal);
1920 break;
1921 case sizeof (uint32_t):
1922 /* LINTED - alignment */
1923 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) /
1924 (uint32_t)normal);
1925 break;
1926 case sizeof (uint16_t):
1927 /* LINTED - alignment */
1928 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) /
1929 (uint32_t)normal);
1930 break;
1931 case sizeof (uint8_t):
1932 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) /
1933 (uint32_t)normal);
1934 break;
1935 default:
1936 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0);
1937 break;
1938 }
1939
1940 return (err);
1941 }
1942
1943 int
dt_print_aggs(const dtrace_aggdata_t ** aggsdata,int naggvars,void * arg)1944 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg)
1945 {
1946 int i, aggact = 0;
1947 dt_print_aggdata_t *pd = arg;
1948 const dtrace_aggdata_t *aggdata = aggsdata[0];
1949 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1950 FILE *fp = pd->dtpa_fp;
1951 dtrace_hdl_t *dtp = pd->dtpa_dtp;
1952 dtrace_recdesc_t *rec;
1953 dtrace_actkind_t act;
1954 caddr_t addr;
1955 size_t size;
1956
1957 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL);
1958 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN);
1959
1960 /*
1961 * Iterate over each record description in the key, printing the traced
1962 * data, skipping the first datum (the tuple member created by the
1963 * compiler).
1964 */
1965 for (i = 1; i < agg->dtagd_nrecs; i++) {
1966 rec = &agg->dtagd_rec[i];
1967 act = rec->dtrd_action;
1968 addr = aggdata->dtada_data + rec->dtrd_offset;
1969 size = rec->dtrd_size;
1970
1971 if (DTRACEACT_ISAGG(act)) {
1972 aggact = i;
1973 break;
1974 }
1975
1976 if (dt_print_datum(dtp, fp, rec, addr,
1977 size, aggdata, 1, pd) < 0)
1978 return (-1);
1979
1980 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
1981 DTRACE_BUFDATA_AGGKEY) < 0)
1982 return (-1);
1983 }
1984
1985 assert(aggact != 0);
1986
1987 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) {
1988 uint64_t normal;
1989
1990 aggdata = aggsdata[i];
1991 agg = aggdata->dtada_desc;
1992 rec = &agg->dtagd_rec[aggact];
1993 act = rec->dtrd_action;
1994 addr = aggdata->dtada_data + rec->dtrd_offset;
1995 size = rec->dtrd_size;
1996
1997 assert(DTRACEACT_ISAGG(act));
1998 normal = aggdata->dtada_normal;
1999
2000 if (dt_print_datum(dtp, fp, rec, addr,
2001 size, aggdata, normal, pd) < 0)
2002 return (-1);
2003
2004 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
2005 DTRACE_BUFDATA_AGGVAL) < 0)
2006 return (-1);
2007
2008 if (!pd->dtpa_allunprint)
2009 agg->dtagd_flags |= DTRACE_AGD_PRINTED;
2010 }
2011
2012 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) {
2013 if (dt_printf(dtp, fp, "\n") < 0)
2014 return (-1);
2015 }
2016
2017 if (dt_buffered_flush(dtp, NULL, NULL, aggdata,
2018 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0)
2019 return (-1);
2020
2021 return (0);
2022 }
2023
2024 int
dt_print_agg(const dtrace_aggdata_t * aggdata,void * arg)2025 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg)
2026 {
2027 dt_print_aggdata_t *pd = arg;
2028 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
2029 dtrace_aggvarid_t aggvarid = pd->dtpa_id;
2030
2031 if (pd->dtpa_allunprint) {
2032 if (agg->dtagd_flags & DTRACE_AGD_PRINTED)
2033 return (0);
2034 } else {
2035 /*
2036 * If we're not printing all unprinted aggregations, then the
2037 * aggregation variable ID denotes a specific aggregation
2038 * variable that we should print -- skip any other aggregations
2039 * that we encounter.
2040 */
2041 if (agg->dtagd_nrecs == 0)
2042 return (0);
2043
2044 if (aggvarid != agg->dtagd_varid)
2045 return (0);
2046 }
2047
2048 return (dt_print_aggs(&aggdata, 1, arg));
2049 }
2050
2051 int
dt_setopt(dtrace_hdl_t * dtp,const dtrace_probedata_t * data,const char * option,const char * value)2052 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data,
2053 const char *option, const char *value)
2054 {
2055 int len, rval;
2056 char *msg;
2057 const char *errstr;
2058 dtrace_setoptdata_t optdata;
2059
2060 bzero(&optdata, sizeof (optdata));
2061 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval);
2062
2063 if (dtrace_setopt(dtp, option, value) == 0) {
2064 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval);
2065 optdata.dtsda_probe = data;
2066 optdata.dtsda_option = option;
2067 optdata.dtsda_handle = dtp;
2068
2069 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0)
2070 return (rval);
2071
2072 return (0);
2073 }
2074
2075 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp));
2076 len = strlen(option) + strlen(value) + strlen(errstr) + 80;
2077 msg = alloca(len);
2078
2079 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n",
2080 option, value, errstr);
2081
2082 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0)
2083 return (0);
2084
2085 return (rval);
2086 }
2087
2088 static int
dt_consume_cpu(dtrace_hdl_t * dtp,FILE * fp,int cpu,dtrace_bufdesc_t * buf,boolean_t just_one,dtrace_consume_probe_f * efunc,dtrace_consume_rec_f * rfunc,void * arg)2089 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu,
2090 dtrace_bufdesc_t *buf, boolean_t just_one,
2091 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg)
2092 {
2093 dtrace_epid_t id;
2094 size_t offs;
2095 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET);
2096 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET);
2097 int rval, i, n;
2098 uint64_t tracememsize = 0;
2099 dtrace_probedata_t data;
2100 uint64_t drops;
2101
2102 bzero(&data, sizeof (data));
2103 data.dtpda_handle = dtp;
2104 data.dtpda_cpu = cpu;
2105 data.dtpda_flow = dtp->dt_flow;
2106 data.dtpda_indent = dtp->dt_indent;
2107 data.dtpda_prefix = dtp->dt_prefix;
2108
2109 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) {
2110 dtrace_eprobedesc_t *epd;
2111
2112 /*
2113 * We're guaranteed to have an ID.
2114 */
2115 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
2116
2117 if (id == DTRACE_EPIDNONE) {
2118 /*
2119 * This is filler to assure proper alignment of the
2120 * next record; we simply ignore it.
2121 */
2122 offs += sizeof (id);
2123 continue;
2124 }
2125
2126 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc,
2127 &data.dtpda_pdesc)) != 0)
2128 return (rval);
2129
2130 epd = data.dtpda_edesc;
2131 data.dtpda_data = buf->dtbd_data + offs;
2132
2133 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) {
2134 rval = dt_handle(dtp, &data);
2135
2136 if (rval == DTRACE_CONSUME_NEXT)
2137 goto nextepid;
2138
2139 if (rval == DTRACE_CONSUME_ERROR)
2140 return (-1);
2141 }
2142
2143 if (flow)
2144 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid,
2145 buf, offs);
2146
2147 rval = (*efunc)(&data, arg);
2148
2149 if (flow) {
2150 if (data.dtpda_flow == DTRACEFLOW_ENTRY)
2151 data.dtpda_indent += 2;
2152 }
2153
2154 if (rval == DTRACE_CONSUME_NEXT)
2155 goto nextepid;
2156
2157 if (rval == DTRACE_CONSUME_ABORT)
2158 return (dt_set_errno(dtp, EDT_DIRABORT));
2159
2160 if (rval != DTRACE_CONSUME_THIS)
2161 return (dt_set_errno(dtp, EDT_BADRVAL));
2162
2163 for (i = 0; i < epd->dtepd_nrecs; i++) {
2164 caddr_t addr;
2165 dtrace_recdesc_t *rec = &epd->dtepd_rec[i];
2166 dtrace_actkind_t act = rec->dtrd_action;
2167
2168 data.dtpda_data = buf->dtbd_data + offs +
2169 rec->dtrd_offset;
2170 addr = data.dtpda_data;
2171
2172 if (act == DTRACEACT_LIBACT) {
2173 uint64_t arg = rec->dtrd_arg;
2174 dtrace_aggvarid_t id;
2175
2176 switch (arg) {
2177 case DT_ACT_CLEAR:
2178 /* LINTED - alignment */
2179 id = *((dtrace_aggvarid_t *)addr);
2180 (void) dtrace_aggregate_walk(dtp,
2181 dt_clear_agg, &id);
2182 continue;
2183
2184 case DT_ACT_DENORMALIZE:
2185 /* LINTED - alignment */
2186 id = *((dtrace_aggvarid_t *)addr);
2187 (void) dtrace_aggregate_walk(dtp,
2188 dt_denormalize_agg, &id);
2189 continue;
2190
2191 case DT_ACT_FTRUNCATE:
2192 if (fp == NULL)
2193 continue;
2194
2195 (void) fflush(fp);
2196 (void) ftruncate(fileno(fp), 0);
2197 (void) fseeko(fp, 0, SEEK_SET);
2198 continue;
2199
2200 case DT_ACT_NORMALIZE:
2201 if (i == epd->dtepd_nrecs - 1)
2202 return (dt_set_errno(dtp,
2203 EDT_BADNORMAL));
2204
2205 if (dt_normalize(dtp,
2206 buf->dtbd_data + offs, rec) != 0)
2207 return (-1);
2208
2209 i++;
2210 continue;
2211
2212 case DT_ACT_SETOPT: {
2213 uint64_t *opts = dtp->dt_options;
2214 dtrace_recdesc_t *valrec;
2215 uint32_t valsize;
2216 caddr_t val;
2217 int rv;
2218
2219 if (i == epd->dtepd_nrecs - 1) {
2220 return (dt_set_errno(dtp,
2221 EDT_BADSETOPT));
2222 }
2223
2224 valrec = &epd->dtepd_rec[++i];
2225 valsize = valrec->dtrd_size;
2226
2227 if (valrec->dtrd_action != act ||
2228 valrec->dtrd_arg != arg) {
2229 return (dt_set_errno(dtp,
2230 EDT_BADSETOPT));
2231 }
2232
2233 if (valsize > sizeof (uint64_t)) {
2234 val = buf->dtbd_data + offs +
2235 valrec->dtrd_offset;
2236 } else {
2237 val = "1";
2238 }
2239
2240 rv = dt_setopt(dtp, &data, addr, val);
2241
2242 if (rv != 0)
2243 return (-1);
2244
2245 flow = (opts[DTRACEOPT_FLOWINDENT] !=
2246 DTRACEOPT_UNSET);
2247 quiet = (opts[DTRACEOPT_QUIET] !=
2248 DTRACEOPT_UNSET);
2249
2250 continue;
2251 }
2252
2253 case DT_ACT_TRUNC:
2254 if (i == epd->dtepd_nrecs - 1)
2255 return (dt_set_errno(dtp,
2256 EDT_BADTRUNC));
2257
2258 if (dt_trunc(dtp,
2259 buf->dtbd_data + offs, rec) != 0)
2260 return (-1);
2261
2262 i++;
2263 continue;
2264
2265 default:
2266 continue;
2267 }
2268 }
2269
2270 if (act == DTRACEACT_TRACEMEM_DYNSIZE &&
2271 rec->dtrd_size == sizeof (uint64_t)) {
2272 /* LINTED - alignment */
2273 tracememsize = *((unsigned long long *)addr);
2274 continue;
2275 }
2276
2277 rval = (*rfunc)(&data, rec, arg);
2278
2279 if (rval == DTRACE_CONSUME_NEXT)
2280 continue;
2281
2282 if (rval == DTRACE_CONSUME_ABORT)
2283 return (dt_set_errno(dtp, EDT_DIRABORT));
2284
2285 if (rval != DTRACE_CONSUME_THIS)
2286 return (dt_set_errno(dtp, EDT_BADRVAL));
2287
2288 if (act == DTRACEACT_STACK) {
2289 int depth = rec->dtrd_arg;
2290
2291 if (dt_print_stack(dtp, fp, NULL, addr, depth,
2292 rec->dtrd_size / depth) < 0)
2293 return (-1);
2294 goto nextrec;
2295 }
2296
2297 if (act == DTRACEACT_USTACK ||
2298 act == DTRACEACT_JSTACK) {
2299 if (dt_print_ustack(dtp, fp, NULL,
2300 addr, rec->dtrd_arg) < 0)
2301 return (-1);
2302 goto nextrec;
2303 }
2304
2305 if (act == DTRACEACT_SYM) {
2306 if (dt_print_sym(dtp, fp, NULL, addr) < 0)
2307 return (-1);
2308 goto nextrec;
2309 }
2310
2311 if (act == DTRACEACT_MOD) {
2312 if (dt_print_mod(dtp, fp, NULL, addr) < 0)
2313 return (-1);
2314 goto nextrec;
2315 }
2316
2317 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) {
2318 if (dt_print_usym(dtp, fp, addr, act) < 0)
2319 return (-1);
2320 goto nextrec;
2321 }
2322
2323 if (act == DTRACEACT_UMOD) {
2324 if (dt_print_umod(dtp, fp, NULL, addr) < 0)
2325 return (-1);
2326 goto nextrec;
2327 }
2328
2329 if (DTRACEACT_ISPRINTFLIKE(act)) {
2330 void *fmtdata;
2331 int (*func)(dtrace_hdl_t *, FILE *, void *,
2332 const dtrace_probedata_t *,
2333 const dtrace_recdesc_t *, uint_t,
2334 const void *buf, size_t);
2335
2336 if ((fmtdata = dt_format_lookup(dtp,
2337 rec->dtrd_format)) == NULL)
2338 goto nofmt;
2339
2340 switch (act) {
2341 case DTRACEACT_PRINTF:
2342 func = dtrace_fprintf;
2343 break;
2344 case DTRACEACT_PRINTA:
2345 func = dtrace_fprinta;
2346 break;
2347 case DTRACEACT_SYSTEM:
2348 func = dtrace_system;
2349 break;
2350 case DTRACEACT_FREOPEN:
2351 func = dtrace_freopen;
2352 break;
2353 }
2354
2355 n = (*func)(dtp, fp, fmtdata, &data,
2356 rec, epd->dtepd_nrecs - i,
2357 (uchar_t *)buf->dtbd_data + offs,
2358 buf->dtbd_size - offs);
2359
2360 if (n < 0)
2361 return (-1); /* errno is set for us */
2362
2363 if (n > 0)
2364 i += n - 1;
2365 goto nextrec;
2366 }
2367
2368 /*
2369 * If this is a DIF expression, and the record has a
2370 * format set, this indicates we have a CTF type name
2371 * associated with the data and we should try to print
2372 * it out by type.
2373 */
2374 if (act == DTRACEACT_DIFEXPR) {
2375 const char *strdata = dt_strdata_lookup(dtp,
2376 rec->dtrd_format);
2377 if (strdata != NULL) {
2378 n = dtrace_print(dtp, fp, strdata,
2379 addr, rec->dtrd_size);
2380
2381 /*
2382 * dtrace_print() will return -1 on
2383 * error, or return the number of bytes
2384 * consumed. It will return 0 if the
2385 * type couldn't be determined, and we
2386 * should fall through to the normal
2387 * trace method.
2388 */
2389 if (n < 0)
2390 return (-1);
2391
2392 if (n > 0)
2393 goto nextrec;
2394 }
2395 }
2396
2397 nofmt:
2398 if (act == DTRACEACT_PRINTA) {
2399 dt_print_aggdata_t pd;
2400 dtrace_aggvarid_t *aggvars;
2401 int j, naggvars = 0;
2402 size_t size = ((epd->dtepd_nrecs - i) *
2403 sizeof (dtrace_aggvarid_t));
2404
2405 if ((aggvars = dt_alloc(dtp, size)) == NULL)
2406 return (-1);
2407
2408 /*
2409 * This might be a printa() with multiple
2410 * aggregation variables. We need to scan
2411 * forward through the records until we find
2412 * a record from a different statement.
2413 */
2414 for (j = i; j < epd->dtepd_nrecs; j++) {
2415 dtrace_recdesc_t *nrec;
2416 caddr_t naddr;
2417
2418 nrec = &epd->dtepd_rec[j];
2419
2420 if (nrec->dtrd_uarg != rec->dtrd_uarg)
2421 break;
2422
2423 if (nrec->dtrd_action != act) {
2424 return (dt_set_errno(dtp,
2425 EDT_BADAGG));
2426 }
2427
2428 naddr = buf->dtbd_data + offs +
2429 nrec->dtrd_offset;
2430
2431 aggvars[naggvars++] =
2432 /* LINTED - alignment */
2433 *((dtrace_aggvarid_t *)naddr);
2434 }
2435
2436 i = j - 1;
2437 bzero(&pd, sizeof (pd));
2438 pd.dtpa_dtp = dtp;
2439 pd.dtpa_fp = fp;
2440
2441 assert(naggvars >= 1);
2442
2443 if (naggvars == 1) {
2444 pd.dtpa_id = aggvars[0];
2445 dt_free(dtp, aggvars);
2446
2447 if (dt_printf(dtp, fp, "\n") < 0 ||
2448 dtrace_aggregate_walk_sorted(dtp,
2449 dt_print_agg, &pd) < 0)
2450 return (-1);
2451 goto nextrec;
2452 }
2453
2454 if (dt_printf(dtp, fp, "\n") < 0 ||
2455 dtrace_aggregate_walk_joined(dtp, aggvars,
2456 naggvars, dt_print_aggs, &pd) < 0) {
2457 dt_free(dtp, aggvars);
2458 return (-1);
2459 }
2460
2461 dt_free(dtp, aggvars);
2462 goto nextrec;
2463 }
2464
2465 if (act == DTRACEACT_TRACEMEM) {
2466 if (tracememsize == 0 ||
2467 tracememsize > rec->dtrd_size) {
2468 tracememsize = rec->dtrd_size;
2469 }
2470
2471 n = dt_print_bytes(dtp, fp, addr,
2472 tracememsize, -33, quiet, 1);
2473
2474 tracememsize = 0;
2475
2476 if (n < 0)
2477 return (-1);
2478
2479 goto nextrec;
2480 }
2481
2482 switch (rec->dtrd_size) {
2483 case sizeof (uint64_t):
2484 n = dt_printf(dtp, fp,
2485 quiet ? "%lld" : " %16lld",
2486 /* LINTED - alignment */
2487 *((unsigned long long *)addr));
2488 break;
2489 case sizeof (uint32_t):
2490 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d",
2491 /* LINTED - alignment */
2492 *((uint32_t *)addr));
2493 break;
2494 case sizeof (uint16_t):
2495 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d",
2496 /* LINTED - alignment */
2497 *((uint16_t *)addr));
2498 break;
2499 case sizeof (uint8_t):
2500 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d",
2501 *((uint8_t *)addr));
2502 break;
2503 default:
2504 n = dt_print_bytes(dtp, fp, addr,
2505 rec->dtrd_size, -33, quiet, 0);
2506 break;
2507 }
2508
2509 if (n < 0)
2510 return (-1); /* errno is set for us */
2511
2512 nextrec:
2513 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0)
2514 return (-1); /* errno is set for us */
2515 }
2516
2517 /*
2518 * Call the record callback with a NULL record to indicate
2519 * that we're done processing this EPID.
2520 */
2521 rval = (*rfunc)(&data, NULL, arg);
2522 nextepid:
2523 offs += epd->dtepd_size;
2524 dtp->dt_last_epid = id;
2525 if (just_one) {
2526 buf->dtbd_oldest = offs;
2527 break;
2528 }
2529 }
2530
2531 dtp->dt_flow = data.dtpda_flow;
2532 dtp->dt_indent = data.dtpda_indent;
2533 dtp->dt_prefix = data.dtpda_prefix;
2534
2535 if ((drops = buf->dtbd_drops) == 0)
2536 return (0);
2537
2538 /*
2539 * Explicitly zero the drops to prevent us from processing them again.
2540 */
2541 buf->dtbd_drops = 0;
2542
2543 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops));
2544 }
2545
2546 /*
2547 * Reduce memory usage by shrinking the buffer if it's no more than half full.
2548 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is
2549 * only 4-byte aligned.
2550 */
2551 static void
dt_realloc_buf(dtrace_hdl_t * dtp,dtrace_bufdesc_t * buf,int cursize)2552 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize)
2553 {
2554 uint64_t used = buf->dtbd_size - buf->dtbd_oldest;
2555 if (used < cursize / 2) {
2556 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2557 char *newdata = dt_alloc(dtp, used + misalign);
2558 if (newdata == NULL)
2559 return;
2560 bzero(newdata, misalign);
2561 bcopy(buf->dtbd_data + buf->dtbd_oldest,
2562 newdata + misalign, used);
2563 dt_free(dtp, buf->dtbd_data);
2564 buf->dtbd_oldest = misalign;
2565 buf->dtbd_size = used + misalign;
2566 buf->dtbd_data = newdata;
2567 }
2568 }
2569
2570 /*
2571 * If the ring buffer has wrapped, the data is not in order. Rearrange it
2572 * so that it is. Note, we need to preserve the alignment of the data at
2573 * dtbd_oldest, which is only 4-byte aligned.
2574 */
2575 static int
dt_unring_buf(dtrace_hdl_t * dtp,dtrace_bufdesc_t * buf)2576 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2577 {
2578 int misalign;
2579 char *newdata, *ndp;
2580
2581 if (buf->dtbd_oldest == 0)
2582 return (0);
2583
2584 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2585 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign);
2586
2587 if (newdata == NULL)
2588 return (-1);
2589
2590 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1)));
2591
2592 bzero(ndp, misalign);
2593 ndp += misalign;
2594
2595 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp,
2596 buf->dtbd_size - buf->dtbd_oldest);
2597 ndp += buf->dtbd_size - buf->dtbd_oldest;
2598
2599 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest);
2600
2601 dt_free(dtp, buf->dtbd_data);
2602 buf->dtbd_oldest = misalign;
2603 buf->dtbd_data = newdata;
2604 buf->dtbd_size += misalign;
2605
2606 return (0);
2607 }
2608
2609 static void
dt_put_buf(dtrace_hdl_t * dtp,dtrace_bufdesc_t * buf)2610 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2611 {
2612 dt_free(dtp, buf->dtbd_data);
2613 dt_free(dtp, buf);
2614 }
2615
2616 /*
2617 * Returns 0 on success, in which case *cbp will be filled in if we retrieved
2618 * data, or NULL if there is no data for this CPU.
2619 * Returns -1 on failure and sets dt_errno.
2620 */
2621 static int
dt_get_buf(dtrace_hdl_t * dtp,int cpu,dtrace_bufdesc_t ** bufp)2622 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp)
2623 {
2624 dtrace_optval_t size;
2625 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf));
2626 int error;
2627
2628 if (buf == NULL)
2629 return (-1);
2630
2631 (void) dtrace_getopt(dtp, "bufsize", &size);
2632 buf->dtbd_data = dt_alloc(dtp, size);
2633 if (buf->dtbd_data == NULL) {
2634 dt_free(dtp, buf);
2635 return (-1);
2636 }
2637 buf->dtbd_size = size;
2638 buf->dtbd_cpu = cpu;
2639
2640 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) {
2641 dt_put_buf(dtp, buf);
2642 /*
2643 * If we failed with ENOENT, it may be because the
2644 * CPU was unconfigured -- this is okay. Any other
2645 * error, however, is unexpected.
2646 */
2647 if (errno == ENOENT) {
2648 *bufp = NULL;
2649 return (0);
2650 }
2651
2652 return (dt_set_errno(dtp, errno));
2653 }
2654
2655 error = dt_unring_buf(dtp, buf);
2656 if (error != 0) {
2657 dt_put_buf(dtp, buf);
2658 return (error);
2659 }
2660 dt_realloc_buf(dtp, buf, size);
2661
2662 *bufp = buf;
2663 return (0);
2664 }
2665
2666 typedef struct dt_begin {
2667 dtrace_consume_probe_f *dtbgn_probefunc;
2668 dtrace_consume_rec_f *dtbgn_recfunc;
2669 void *dtbgn_arg;
2670 dtrace_handle_err_f *dtbgn_errhdlr;
2671 void *dtbgn_errarg;
2672 int dtbgn_beginonly;
2673 } dt_begin_t;
2674
2675 static int
dt_consume_begin_probe(const dtrace_probedata_t * data,void * arg)2676 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg)
2677 {
2678 dt_begin_t *begin = arg;
2679 dtrace_probedesc_t *pd = data->dtpda_pdesc;
2680
2681 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2682 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2683
2684 if (begin->dtbgn_beginonly) {
2685 if (!(r1 && r2))
2686 return (DTRACE_CONSUME_NEXT);
2687 } else {
2688 if (r1 && r2)
2689 return (DTRACE_CONSUME_NEXT);
2690 }
2691
2692 /*
2693 * We have a record that we're interested in. Now call the underlying
2694 * probe function...
2695 */
2696 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg));
2697 }
2698
2699 static int
dt_consume_begin_record(const dtrace_probedata_t * data,const dtrace_recdesc_t * rec,void * arg)2700 dt_consume_begin_record(const dtrace_probedata_t *data,
2701 const dtrace_recdesc_t *rec, void *arg)
2702 {
2703 dt_begin_t *begin = arg;
2704
2705 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg));
2706 }
2707
2708 static int
dt_consume_begin_error(const dtrace_errdata_t * data,void * arg)2709 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg)
2710 {
2711 dt_begin_t *begin = (dt_begin_t *)arg;
2712 dtrace_probedesc_t *pd = data->dteda_pdesc;
2713
2714 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2715 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2716
2717 if (begin->dtbgn_beginonly) {
2718 if (!(r1 && r2))
2719 return (DTRACE_HANDLE_OK);
2720 } else {
2721 if (r1 && r2)
2722 return (DTRACE_HANDLE_OK);
2723 }
2724
2725 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg));
2726 }
2727
2728 static int
dt_consume_begin(dtrace_hdl_t * dtp,FILE * fp,dtrace_consume_probe_f * pf,dtrace_consume_rec_f * rf,void * arg)2729 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp,
2730 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2731 {
2732 /*
2733 * There's this idea that the BEGIN probe should be processed before
2734 * everything else, and that the END probe should be processed after
2735 * anything else. In the common case, this is pretty easy to deal
2736 * with. However, a situation may arise where the BEGIN enabling and
2737 * END enabling are on the same CPU, and some enabling in the middle
2738 * occurred on a different CPU. To deal with this (blech!) we need to
2739 * consume the BEGIN buffer up until the end of the BEGIN probe, and
2740 * then set it aside. We will then process every other CPU, and then
2741 * we'll return to the BEGIN CPU and process the rest of the data
2742 * (which will inevitably include the END probe, if any). Making this
2743 * even more complicated (!) is the library's ERROR enabling. Because
2744 * this enabling is processed before we even get into the consume call
2745 * back, any ERROR firing would result in the library's ERROR enabling
2746 * being processed twice -- once in our first pass (for BEGIN probes),
2747 * and again in our second pass (for everything but BEGIN probes). To
2748 * deal with this, we interpose on the ERROR handler to assure that we
2749 * only process ERROR enablings induced by BEGIN enablings in the
2750 * first pass, and that we only process ERROR enablings _not_ induced
2751 * by BEGIN enablings in the second pass.
2752 */
2753
2754 dt_begin_t begin;
2755 processorid_t cpu = dtp->dt_beganon;
2756 int rval, i;
2757 static int max_ncpus;
2758 dtrace_bufdesc_t *buf;
2759
2760 dtp->dt_beganon = -1;
2761
2762 if (dt_get_buf(dtp, cpu, &buf) != 0)
2763 return (-1);
2764 if (buf == NULL)
2765 return (0);
2766
2767 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) {
2768 /*
2769 * This is the simple case. We're either not stopped, or if
2770 * we are, we actually processed any END probes on another
2771 * CPU. We can simply consume this buffer and return.
2772 */
2773 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2774 pf, rf, arg);
2775 dt_put_buf(dtp, buf);
2776 return (rval);
2777 }
2778
2779 begin.dtbgn_probefunc = pf;
2780 begin.dtbgn_recfunc = rf;
2781 begin.dtbgn_arg = arg;
2782 begin.dtbgn_beginonly = 1;
2783
2784 /*
2785 * We need to interpose on the ERROR handler to be sure that we
2786 * only process ERRORs induced by BEGIN.
2787 */
2788 begin.dtbgn_errhdlr = dtp->dt_errhdlr;
2789 begin.dtbgn_errarg = dtp->dt_errarg;
2790 dtp->dt_errhdlr = dt_consume_begin_error;
2791 dtp->dt_errarg = &begin;
2792
2793 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2794 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2795
2796 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2797 dtp->dt_errarg = begin.dtbgn_errarg;
2798
2799 if (rval != 0) {
2800 dt_put_buf(dtp, buf);
2801 return (rval);
2802 }
2803
2804 if (max_ncpus == 0)
2805 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2806
2807 for (i = 0; i < max_ncpus; i++) {
2808 dtrace_bufdesc_t *nbuf;
2809 if (i == cpu)
2810 continue;
2811
2812 if (dt_get_buf(dtp, i, &nbuf) != 0) {
2813 dt_put_buf(dtp, buf);
2814 return (-1);
2815 }
2816 if (nbuf == NULL)
2817 continue;
2818
2819 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE,
2820 pf, rf, arg);
2821 dt_put_buf(dtp, nbuf);
2822 if (rval != 0) {
2823 dt_put_buf(dtp, buf);
2824 return (rval);
2825 }
2826 }
2827
2828 /*
2829 * Okay -- we're done with the other buffers. Now we want to
2830 * reconsume the first buffer -- but this time we're looking for
2831 * everything _but_ BEGIN. And of course, in order to only consume
2832 * those ERRORs _not_ associated with BEGIN, we need to reinstall our
2833 * ERROR interposition function...
2834 */
2835 begin.dtbgn_beginonly = 0;
2836
2837 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr);
2838 assert(begin.dtbgn_errarg == dtp->dt_errarg);
2839 dtp->dt_errhdlr = dt_consume_begin_error;
2840 dtp->dt_errarg = &begin;
2841
2842 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2843 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2844
2845 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2846 dtp->dt_errarg = begin.dtbgn_errarg;
2847
2848 return (rval);
2849 }
2850
2851 /* ARGSUSED */
2852 static uint64_t
dt_buf_oldest(void * elem,void * arg)2853 dt_buf_oldest(void *elem, void *arg)
2854 {
2855 dtrace_bufdesc_t *buf = elem;
2856 size_t offs = buf->dtbd_oldest;
2857
2858 while (offs < buf->dtbd_size) {
2859 dtrace_rechdr_t *dtrh =
2860 /* LINTED - alignment */
2861 (dtrace_rechdr_t *)(buf->dtbd_data + offs);
2862 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2863 offs += sizeof (dtrace_epid_t);
2864 } else {
2865 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh));
2866 }
2867 }
2868
2869 /* There are no records left; use the time the buffer was retrieved. */
2870 return (buf->dtbd_timestamp);
2871 }
2872
2873 int
dtrace_consume(dtrace_hdl_t * dtp,FILE * fp,dtrace_consume_probe_f * pf,dtrace_consume_rec_f * rf,void * arg)2874 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp,
2875 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2876 {
2877 dtrace_optval_t size;
2878 static int max_ncpus;
2879 int i, rval;
2880 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE];
2881 hrtime_t now = gethrtime();
2882
2883 if (dtp->dt_lastswitch != 0) {
2884 if (now - dtp->dt_lastswitch < interval)
2885 return (0);
2886
2887 dtp->dt_lastswitch += interval;
2888 } else {
2889 dtp->dt_lastswitch = now;
2890 }
2891
2892 if (!dtp->dt_active)
2893 return (dt_set_errno(dtp, EINVAL));
2894
2895 if (max_ncpus == 0)
2896 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2897
2898 if (pf == NULL)
2899 pf = (dtrace_consume_probe_f *)dt_nullprobe;
2900
2901 if (rf == NULL)
2902 rf = (dtrace_consume_rec_f *)dt_nullrec;
2903
2904 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) {
2905 /*
2906 * The output will not be in the order it was traced. Rather,
2907 * we will consume all of the data from each CPU's buffer in
2908 * turn. We apply special handling for the records from BEGIN
2909 * and END probes so that they are consumed first and last,
2910 * respectively.
2911 *
2912 * If we have just begun, we want to first process the CPU that
2913 * executed the BEGIN probe (if any).
2914 */
2915 if (dtp->dt_active && dtp->dt_beganon != -1 &&
2916 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0)
2917 return (rval);
2918
2919 for (i = 0; i < max_ncpus; i++) {
2920 dtrace_bufdesc_t *buf;
2921
2922 /*
2923 * If we have stopped, we want to process the CPU on
2924 * which the END probe was processed only _after_ we
2925 * have processed everything else.
2926 */
2927 if (dtp->dt_stopped && (i == dtp->dt_endedon))
2928 continue;
2929
2930 if (dt_get_buf(dtp, i, &buf) != 0)
2931 return (-1);
2932 if (buf == NULL)
2933 continue;
2934
2935 dtp->dt_flow = 0;
2936 dtp->dt_indent = 0;
2937 dtp->dt_prefix = NULL;
2938 rval = dt_consume_cpu(dtp, fp, i,
2939 buf, B_FALSE, pf, rf, arg);
2940 dt_put_buf(dtp, buf);
2941 if (rval != 0)
2942 return (rval);
2943 }
2944 if (dtp->dt_stopped) {
2945 dtrace_bufdesc_t *buf;
2946
2947 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0)
2948 return (-1);
2949 if (buf == NULL)
2950 return (0);
2951
2952 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon,
2953 buf, B_FALSE, pf, rf, arg);
2954 dt_put_buf(dtp, buf);
2955 return (rval);
2956 }
2957 } else {
2958 /*
2959 * The output will be in the order it was traced (or for
2960 * speculations, when it was committed). We retrieve a buffer
2961 * from each CPU and put it into a priority queue, which sorts
2962 * based on the first entry in the buffer. This is sufficient
2963 * because entries within a buffer are already sorted.
2964 *
2965 * We then consume records one at a time, always consuming the
2966 * oldest record, as determined by the priority queue. When
2967 * we reach the end of the time covered by these buffers,
2968 * we need to stop and retrieve more records on the next pass.
2969 * The kernel tells us the time covered by each buffer, in
2970 * dtbd_timestamp. The first buffer's timestamp tells us the
2971 * time covered by all buffers, as subsequently retrieved
2972 * buffers will cover to a more recent time.
2973 */
2974
2975 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t));
2976 uint64_t first_timestamp = 0;
2977 uint_t cookie = 0;
2978 dtrace_bufdesc_t *buf;
2979
2980 bzero(drops, max_ncpus * sizeof (uint64_t));
2981
2982 if (dtp->dt_bufq == NULL) {
2983 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2,
2984 dt_buf_oldest, NULL);
2985 if (dtp->dt_bufq == NULL) /* ENOMEM */
2986 return (-1);
2987 }
2988
2989 /* Retrieve data from each CPU. */
2990 (void) dtrace_getopt(dtp, "bufsize", &size);
2991 for (i = 0; i < max_ncpus; i++) {
2992 dtrace_bufdesc_t *buf;
2993
2994 if (dt_get_buf(dtp, i, &buf) != 0)
2995 return (-1);
2996 if (buf != NULL) {
2997 if (first_timestamp == 0)
2998 first_timestamp = buf->dtbd_timestamp;
2999 assert(buf->dtbd_timestamp >= first_timestamp);
3000
3001 dt_pq_insert(dtp->dt_bufq, buf);
3002 drops[i] = buf->dtbd_drops;
3003 buf->dtbd_drops = 0;
3004 }
3005 }
3006
3007 /* Consume records. */
3008 for (;;) {
3009 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq);
3010 uint64_t timestamp;
3011
3012 if (buf == NULL)
3013 break;
3014
3015 timestamp = dt_buf_oldest(buf, dtp);
3016 if (timestamp == buf->dtbd_timestamp) {
3017 /*
3018 * We've reached the end of the time covered
3019 * by this buffer. If this is the oldest
3020 * buffer, we must do another pass
3021 * to retrieve more data.
3022 */
3023 dt_put_buf(dtp, buf);
3024 if (timestamp == first_timestamp &&
3025 !dtp->dt_stopped)
3026 break;
3027 continue;
3028 }
3029 assert(timestamp >= dtp->dt_last_timestamp);
3030 dtp->dt_last_timestamp = timestamp;
3031
3032 if ((rval = dt_consume_cpu(dtp, fp,
3033 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0)
3034 return (rval);
3035 dt_pq_insert(dtp->dt_bufq, buf);
3036 }
3037
3038 /* Consume drops. */
3039 for (i = 0; i < max_ncpus; i++) {
3040 if (drops[i] != 0) {
3041 int error = dt_handle_cpudrop(dtp, i,
3042 DTRACEDROP_PRINCIPAL, drops[i]);
3043 if (error != 0)
3044 return (error);
3045 }
3046 }
3047
3048 /*
3049 * Reduce memory usage by re-allocating smaller buffers
3050 * for the "remnants".
3051 */
3052 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie))
3053 dt_realloc_buf(dtp, buf, buf->dtbd_size);
3054 }
3055
3056 return (0);
3057 }
3058