1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2016, Joyent, Inc. All rights reserved. 29 * Copyright (c) 2012 by Delphix. All rights reserved. 30 */ 31 32 #include <stdlib.h> 33 #include <strings.h> 34 #include <errno.h> 35 #include <unistd.h> 36 #include <dt_impl.h> 37 #include <assert.h> 38 #include <alloca.h> 39 #include <limits.h> 40 41 #define DTRACE_AHASHSIZE 32779 /* big 'ol prime */ 42 43 /* 44 * Because qsort(3C) does not allow an argument to be passed to a comparison 45 * function, the variables that affect comparison must regrettably be global; 46 * they are protected by a global static lock, dt_qsort_lock. 47 */ 48 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER; 49 50 static int dt_revsort; 51 static int dt_keysort; 52 static int dt_keypos; 53 54 #define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1) 55 #define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1) 56 57 static void 58 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size) 59 { 60 int i; 61 62 for (i = 0; i < size / sizeof (int64_t); i++) 63 existing[i] = existing[i] + new[i]; 64 } 65 66 static int 67 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs) 68 { 69 int64_t lvar = *lhs; 70 int64_t rvar = *rhs; 71 72 if (lvar < rvar) 73 return (DT_LESSTHAN); 74 75 if (lvar > rvar) 76 return (DT_GREATERTHAN); 77 78 return (0); 79 } 80 81 /*ARGSUSED*/ 82 static void 83 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size) 84 { 85 if (*new < *existing) 86 *existing = *new; 87 } 88 89 /*ARGSUSED*/ 90 static void 91 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size) 92 { 93 if (*new > *existing) 94 *existing = *new; 95 } 96 97 static int 98 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs) 99 { 100 int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0; 101 int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0; 102 103 if (lavg < ravg) 104 return (DT_LESSTHAN); 105 106 if (lavg > ravg) 107 return (DT_GREATERTHAN); 108 109 return (0); 110 } 111 112 static int 113 dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs) 114 { 115 uint64_t lsd = dt_stddev((uint64_t *)lhs, 1); 116 uint64_t rsd = dt_stddev((uint64_t *)rhs, 1); 117 118 if (lsd < rsd) 119 return (DT_LESSTHAN); 120 121 if (lsd > rsd) 122 return (DT_GREATERTHAN); 123 124 return (0); 125 } 126 127 /*ARGSUSED*/ 128 static void 129 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size) 130 { 131 int64_t arg = *existing++; 132 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 133 int i; 134 135 for (i = 0; i <= levels + 1; i++) 136 existing[i] = existing[i] + new[i + 1]; 137 } 138 139 static long double 140 dt_aggregate_lquantizedsum(int64_t *lquanta) 141 { 142 int64_t arg = *lquanta++; 143 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 144 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 145 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 146 long double total = (long double)lquanta[0] * (long double)(base - 1); 147 148 for (i = 0; i < levels; base += step, i++) 149 total += (long double)lquanta[i + 1] * (long double)base; 150 151 return (total + (long double)lquanta[levels + 1] * 152 (long double)(base + 1)); 153 } 154 155 static int64_t 156 dt_aggregate_lquantizedzero(int64_t *lquanta) 157 { 158 int64_t arg = *lquanta++; 159 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 160 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 161 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 162 163 if (base - 1 == 0) 164 return (lquanta[0]); 165 166 for (i = 0; i < levels; base += step, i++) { 167 if (base != 0) 168 continue; 169 170 return (lquanta[i + 1]); 171 } 172 173 if (base + 1 == 0) 174 return (lquanta[levels + 1]); 175 176 return (0); 177 } 178 179 static int 180 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs) 181 { 182 long double lsum = dt_aggregate_lquantizedsum(lhs); 183 long double rsum = dt_aggregate_lquantizedsum(rhs); 184 int64_t lzero, rzero; 185 186 if (lsum < rsum) 187 return (DT_LESSTHAN); 188 189 if (lsum > rsum) 190 return (DT_GREATERTHAN); 191 192 /* 193 * If they're both equal, then we will compare based on the weights at 194 * zero. If the weights at zero are equal (or if zero is not within 195 * the range of the linear quantization), then this will be judged a 196 * tie and will be resolved based on the key comparison. 197 */ 198 lzero = dt_aggregate_lquantizedzero(lhs); 199 rzero = dt_aggregate_lquantizedzero(rhs); 200 201 if (lzero < rzero) 202 return (DT_LESSTHAN); 203 204 if (lzero > rzero) 205 return (DT_GREATERTHAN); 206 207 return (0); 208 } 209 210 static void 211 dt_aggregate_llquantize(int64_t *existing, int64_t *new, size_t size) 212 { 213 int i; 214 215 for (i = 1; i < size / sizeof (int64_t); i++) 216 existing[i] = existing[i] + new[i]; 217 } 218 219 static long double 220 dt_aggregate_llquantizedsum(int64_t *llquanta) 221 { 222 int64_t arg = *llquanta++; 223 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 224 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 225 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 226 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 227 int bin = 0, order; 228 int64_t value = 1, next, step; 229 long double total; 230 231 assert(nsteps >= factor); 232 assert(nsteps % factor == 0); 233 234 for (order = 0; order < low; order++) 235 value *= factor; 236 237 total = (long double)llquanta[bin++] * (long double)(value - 1); 238 239 next = value * factor; 240 step = next > nsteps ? next / nsteps : 1; 241 242 while (order <= high) { 243 assert(value < next); 244 total += (long double)llquanta[bin++] * (long double)(value); 245 246 if ((value += step) != next) 247 continue; 248 249 next = value * factor; 250 step = next > nsteps ? next / nsteps : 1; 251 order++; 252 } 253 254 return (total + (long double)llquanta[bin] * (long double)value); 255 } 256 257 static int 258 dt_aggregate_llquantizedcmp(int64_t *lhs, int64_t *rhs) 259 { 260 long double lsum = dt_aggregate_llquantizedsum(lhs); 261 long double rsum = dt_aggregate_llquantizedsum(rhs); 262 int64_t lzero, rzero; 263 264 if (lsum < rsum) 265 return (DT_LESSTHAN); 266 267 if (lsum > rsum) 268 return (DT_GREATERTHAN); 269 270 /* 271 * If they're both equal, then we will compare based on the weights at 272 * zero. If the weights at zero are equal, then this will be judged a 273 * tie and will be resolved based on the key comparison. 274 */ 275 lzero = lhs[1]; 276 rzero = rhs[1]; 277 278 if (lzero < rzero) 279 return (DT_LESSTHAN); 280 281 if (lzero > rzero) 282 return (DT_GREATERTHAN); 283 284 return (0); 285 } 286 287 static int 288 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs) 289 { 290 int nbuckets = DTRACE_QUANTIZE_NBUCKETS, i; 291 long double ltotal = 0, rtotal = 0; 292 int64_t lzero, rzero; 293 294 for (i = 0; i < nbuckets; i++) { 295 int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i); 296 297 if (bucketval == 0) { 298 lzero = lhs[i]; 299 rzero = rhs[i]; 300 } 301 302 ltotal += (long double)bucketval * (long double)lhs[i]; 303 rtotal += (long double)bucketval * (long double)rhs[i]; 304 } 305 306 if (ltotal < rtotal) 307 return (DT_LESSTHAN); 308 309 if (ltotal > rtotal) 310 return (DT_GREATERTHAN); 311 312 /* 313 * If they're both equal, then we will compare based on the weights at 314 * zero. If the weights at zero are equal, then this will be judged a 315 * tie and will be resolved based on the key comparison. 316 */ 317 if (lzero < rzero) 318 return (DT_LESSTHAN); 319 320 if (lzero > rzero) 321 return (DT_GREATERTHAN); 322 323 return (0); 324 } 325 326 static void 327 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data) 328 { 329 uint64_t pid = data[0]; 330 uint64_t *pc = &data[1]; 331 struct ps_prochandle *P; 332 GElf_Sym sym; 333 334 if (dtp->dt_vector != NULL) 335 return; 336 337 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 338 return; 339 340 dt_proc_lock(dtp, P); 341 342 if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0) 343 *pc = sym.st_value; 344 345 dt_proc_unlock(dtp, P); 346 dt_proc_release(dtp, P); 347 } 348 349 static void 350 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data) 351 { 352 uint64_t pid = data[0]; 353 uint64_t *pc = &data[1]; 354 struct ps_prochandle *P; 355 const prmap_t *map; 356 357 if (dtp->dt_vector != NULL) 358 return; 359 360 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 361 return; 362 363 dt_proc_lock(dtp, P); 364 365 if ((map = Paddr_to_map(P, *pc)) != NULL) 366 *pc = map->pr_vaddr; 367 368 dt_proc_unlock(dtp, P); 369 dt_proc_release(dtp, P); 370 } 371 372 static void 373 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data) 374 { 375 GElf_Sym sym; 376 uint64_t *pc = data; 377 378 if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0) 379 *pc = sym.st_value; 380 } 381 382 static void 383 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data) 384 { 385 uint64_t *pc = data; 386 dt_module_t *dmp; 387 388 if (dtp->dt_vector != NULL) { 389 /* 390 * We don't have a way of just getting the module for a 391 * vectored open, and it doesn't seem to be worth defining 392 * one. This means that use of mod() won't get true 393 * aggregation in the postmortem case (some modules may 394 * appear more than once in aggregation output). It seems 395 * unlikely that anyone will ever notice or care... 396 */ 397 return; 398 } 399 400 for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL; 401 dmp = dt_list_next(dmp)) { 402 if (*pc - dmp->dm_text_va < dmp->dm_text_size) { 403 *pc = dmp->dm_text_va; 404 return; 405 } 406 } 407 } 408 409 static dtrace_aggvarid_t 410 dt_aggregate_aggvarid(dt_ahashent_t *ent) 411 { 412 dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc; 413 caddr_t data = ent->dtahe_data.dtada_data; 414 dtrace_recdesc_t *rec = agg->dtagd_rec; 415 416 /* 417 * First, we'll check the variable ID in the aggdesc. If it's valid, 418 * we'll return it. If not, we'll use the compiler-generated ID 419 * present as the first record. 420 */ 421 if (agg->dtagd_varid != DTRACE_AGGVARIDNONE) 422 return (agg->dtagd_varid); 423 424 agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data + 425 rec->dtrd_offset)); 426 427 return (agg->dtagd_varid); 428 } 429 430 431 static int 432 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu) 433 { 434 dtrace_epid_t id; 435 uint64_t hashval; 436 size_t offs, roffs, size, ndx; 437 int i, j, rval; 438 caddr_t addr, data; 439 dtrace_recdesc_t *rec; 440 dt_aggregate_t *agp = &dtp->dt_aggregate; 441 dtrace_aggdesc_t *agg; 442 dt_ahash_t *hash = &agp->dtat_hash; 443 dt_ahashent_t *h; 444 dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b; 445 dtrace_aggdata_t *aggdata; 446 int flags = agp->dtat_flags; 447 448 buf->dtbd_cpu = cpu; 449 450 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) { 451 if (errno == ENOENT) { 452 /* 453 * If that failed with ENOENT, it may be because the 454 * CPU was unconfigured. This is okay; we'll just 455 * do nothing but return success. 456 */ 457 return (0); 458 } 459 460 return (dt_set_errno(dtp, errno)); 461 } 462 463 if (buf->dtbd_drops != 0) { 464 if (dt_handle_cpudrop(dtp, cpu, 465 DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1) 466 return (-1); 467 } 468 469 if (buf->dtbd_size == 0) 470 return (0); 471 472 if (hash->dtah_hash == NULL) { 473 size_t size; 474 475 hash->dtah_size = DTRACE_AHASHSIZE; 476 size = hash->dtah_size * sizeof (dt_ahashent_t *); 477 478 if ((hash->dtah_hash = malloc(size)) == NULL) 479 return (dt_set_errno(dtp, EDT_NOMEM)); 480 481 bzero(hash->dtah_hash, size); 482 } 483 484 for (offs = 0; offs < buf->dtbd_size; ) { 485 /* 486 * We're guaranteed to have an ID. 487 */ 488 id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data + 489 (uintptr_t)offs)); 490 491 if (id == DTRACE_AGGIDNONE) { 492 /* 493 * This is filler to assure proper alignment of the 494 * next record; we simply ignore it. 495 */ 496 offs += sizeof (id); 497 continue; 498 } 499 500 if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0) 501 return (rval); 502 503 addr = buf->dtbd_data + offs; 504 size = agg->dtagd_size; 505 hashval = 0; 506 507 for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 508 rec = &agg->dtagd_rec[j]; 509 roffs = rec->dtrd_offset; 510 511 switch (rec->dtrd_action) { 512 case DTRACEACT_USYM: 513 dt_aggregate_usym(dtp, 514 /* LINTED - alignment */ 515 (uint64_t *)&addr[roffs]); 516 break; 517 518 case DTRACEACT_UMOD: 519 dt_aggregate_umod(dtp, 520 /* LINTED - alignment */ 521 (uint64_t *)&addr[roffs]); 522 break; 523 524 case DTRACEACT_SYM: 525 /* LINTED - alignment */ 526 dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]); 527 break; 528 529 case DTRACEACT_MOD: 530 /* LINTED - alignment */ 531 dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]); 532 break; 533 534 default: 535 break; 536 } 537 538 for (i = 0; i < rec->dtrd_size; i++) 539 hashval += addr[roffs + i]; 540 } 541 542 ndx = hashval % hash->dtah_size; 543 544 for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) { 545 if (h->dtahe_hashval != hashval) 546 continue; 547 548 if (h->dtahe_size != size) 549 continue; 550 551 aggdata = &h->dtahe_data; 552 data = aggdata->dtada_data; 553 554 for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 555 rec = &agg->dtagd_rec[j]; 556 roffs = rec->dtrd_offset; 557 558 for (i = 0; i < rec->dtrd_size; i++) 559 if (addr[roffs + i] != data[roffs + i]) 560 goto hashnext; 561 } 562 563 /* 564 * We found it. Now we need to apply the aggregating 565 * action on the data here. 566 */ 567 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 568 roffs = rec->dtrd_offset; 569 /* LINTED - alignment */ 570 h->dtahe_aggregate((int64_t *)&data[roffs], 571 /* LINTED - alignment */ 572 (int64_t *)&addr[roffs], rec->dtrd_size); 573 574 /* 575 * If we're keeping per CPU data, apply the aggregating 576 * action there as well. 577 */ 578 if (aggdata->dtada_percpu != NULL) { 579 data = aggdata->dtada_percpu[cpu]; 580 581 /* LINTED - alignment */ 582 h->dtahe_aggregate((int64_t *)data, 583 /* LINTED - alignment */ 584 (int64_t *)&addr[roffs], rec->dtrd_size); 585 } 586 587 goto bufnext; 588 hashnext: 589 continue; 590 } 591 592 /* 593 * If we're here, we couldn't find an entry for this record. 594 */ 595 if ((h = malloc(sizeof (dt_ahashent_t))) == NULL) 596 return (dt_set_errno(dtp, EDT_NOMEM)); 597 bzero(h, sizeof (dt_ahashent_t)); 598 aggdata = &h->dtahe_data; 599 600 if ((aggdata->dtada_data = malloc(size)) == NULL) { 601 free(h); 602 return (dt_set_errno(dtp, EDT_NOMEM)); 603 } 604 605 bcopy(addr, aggdata->dtada_data, size); 606 aggdata->dtada_size = size; 607 aggdata->dtada_desc = agg; 608 aggdata->dtada_handle = dtp; 609 (void) dt_epid_lookup(dtp, agg->dtagd_epid, 610 &aggdata->dtada_edesc, &aggdata->dtada_pdesc); 611 aggdata->dtada_normal = 1; 612 613 h->dtahe_hashval = hashval; 614 h->dtahe_size = size; 615 (void) dt_aggregate_aggvarid(h); 616 617 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 618 619 if (flags & DTRACE_A_PERCPU) { 620 int max_cpus = agp->dtat_maxcpu; 621 caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t)); 622 623 if (percpu == NULL) { 624 free(aggdata->dtada_data); 625 free(h); 626 return (dt_set_errno(dtp, EDT_NOMEM)); 627 } 628 629 for (j = 0; j < max_cpus; j++) { 630 percpu[j] = malloc(rec->dtrd_size); 631 632 if (percpu[j] == NULL) { 633 while (--j >= 0) 634 free(percpu[j]); 635 636 free(aggdata->dtada_data); 637 free(h); 638 return (dt_set_errno(dtp, EDT_NOMEM)); 639 } 640 641 if (j == cpu) { 642 bcopy(&addr[rec->dtrd_offset], 643 percpu[j], rec->dtrd_size); 644 } else { 645 bzero(percpu[j], rec->dtrd_size); 646 } 647 } 648 649 aggdata->dtada_percpu = percpu; 650 } 651 652 switch (rec->dtrd_action) { 653 case DTRACEAGG_MIN: 654 h->dtahe_aggregate = dt_aggregate_min; 655 break; 656 657 case DTRACEAGG_MAX: 658 h->dtahe_aggregate = dt_aggregate_max; 659 break; 660 661 case DTRACEAGG_LQUANTIZE: 662 h->dtahe_aggregate = dt_aggregate_lquantize; 663 break; 664 665 case DTRACEAGG_LLQUANTIZE: 666 h->dtahe_aggregate = dt_aggregate_llquantize; 667 break; 668 669 case DTRACEAGG_COUNT: 670 case DTRACEAGG_SUM: 671 case DTRACEAGG_AVG: 672 case DTRACEAGG_STDDEV: 673 case DTRACEAGG_QUANTIZE: 674 h->dtahe_aggregate = dt_aggregate_count; 675 break; 676 677 default: 678 return (dt_set_errno(dtp, EDT_BADAGG)); 679 } 680 681 if (hash->dtah_hash[ndx] != NULL) 682 hash->dtah_hash[ndx]->dtahe_prev = h; 683 684 h->dtahe_next = hash->dtah_hash[ndx]; 685 hash->dtah_hash[ndx] = h; 686 687 if (hash->dtah_all != NULL) 688 hash->dtah_all->dtahe_prevall = h; 689 690 h->dtahe_nextall = hash->dtah_all; 691 hash->dtah_all = h; 692 bufnext: 693 offs += agg->dtagd_size; 694 } 695 696 return (0); 697 } 698 699 int 700 dtrace_aggregate_snap(dtrace_hdl_t *dtp) 701 { 702 int i, rval; 703 dt_aggregate_t *agp = &dtp->dt_aggregate; 704 hrtime_t now = gethrtime(); 705 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE]; 706 707 if (dtp->dt_lastagg != 0) { 708 if (now - dtp->dt_lastagg < interval) 709 return (0); 710 711 dtp->dt_lastagg += interval; 712 } else { 713 dtp->dt_lastagg = now; 714 } 715 716 if (!dtp->dt_active) 717 return (dt_set_errno(dtp, EINVAL)); 718 719 if (agp->dtat_buf.dtbd_size == 0) 720 return (0); 721 722 for (i = 0; i < agp->dtat_ncpus; i++) { 723 if (rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])) 724 return (rval); 725 } 726 727 return (0); 728 } 729 730 static int 731 dt_aggregate_hashcmp(const void *lhs, const void *rhs) 732 { 733 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 734 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 735 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 736 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 737 738 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs) 739 return (DT_LESSTHAN); 740 741 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs) 742 return (DT_GREATERTHAN); 743 744 return (0); 745 } 746 747 static int 748 dt_aggregate_varcmp(const void *lhs, const void *rhs) 749 { 750 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 751 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 752 dtrace_aggvarid_t lid, rid; 753 754 lid = dt_aggregate_aggvarid(lh); 755 rid = dt_aggregate_aggvarid(rh); 756 757 if (lid < rid) 758 return (DT_LESSTHAN); 759 760 if (lid > rid) 761 return (DT_GREATERTHAN); 762 763 return (0); 764 } 765 766 static int 767 dt_aggregate_keycmp(const void *lhs, const void *rhs) 768 { 769 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 770 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 771 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 772 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 773 dtrace_recdesc_t *lrec, *rrec; 774 char *ldata, *rdata; 775 int rval, i, j, keypos, nrecs; 776 777 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0) 778 return (rval); 779 780 nrecs = lagg->dtagd_nrecs - 1; 781 assert(nrecs == ragg->dtagd_nrecs - 1); 782 783 keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos; 784 785 for (i = 1; i < nrecs; i++) { 786 uint64_t lval, rval; 787 int ndx = i + keypos; 788 789 if (ndx >= nrecs) 790 ndx = ndx - nrecs + 1; 791 792 lrec = &lagg->dtagd_rec[ndx]; 793 rrec = &ragg->dtagd_rec[ndx]; 794 795 ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset; 796 rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset; 797 798 if (lrec->dtrd_size < rrec->dtrd_size) 799 return (DT_LESSTHAN); 800 801 if (lrec->dtrd_size > rrec->dtrd_size) 802 return (DT_GREATERTHAN); 803 804 switch (lrec->dtrd_size) { 805 case sizeof (uint64_t): 806 /* LINTED - alignment */ 807 lval = *((uint64_t *)ldata); 808 /* LINTED - alignment */ 809 rval = *((uint64_t *)rdata); 810 break; 811 812 case sizeof (uint32_t): 813 /* LINTED - alignment */ 814 lval = *((uint32_t *)ldata); 815 /* LINTED - alignment */ 816 rval = *((uint32_t *)rdata); 817 break; 818 819 case sizeof (uint16_t): 820 /* LINTED - alignment */ 821 lval = *((uint16_t *)ldata); 822 /* LINTED - alignment */ 823 rval = *((uint16_t *)rdata); 824 break; 825 826 case sizeof (uint8_t): 827 lval = *((uint8_t *)ldata); 828 rval = *((uint8_t *)rdata); 829 break; 830 831 default: 832 switch (lrec->dtrd_action) { 833 case DTRACEACT_UMOD: 834 case DTRACEACT_UADDR: 835 case DTRACEACT_USYM: 836 for (j = 0; j < 2; j++) { 837 /* LINTED - alignment */ 838 lval = ((uint64_t *)ldata)[j]; 839 /* LINTED - alignment */ 840 rval = ((uint64_t *)rdata)[j]; 841 842 if (lval < rval) 843 return (DT_LESSTHAN); 844 845 if (lval > rval) 846 return (DT_GREATERTHAN); 847 } 848 849 break; 850 851 default: 852 for (j = 0; j < lrec->dtrd_size; j++) { 853 lval = ((uint8_t *)ldata)[j]; 854 rval = ((uint8_t *)rdata)[j]; 855 856 if (lval < rval) 857 return (DT_LESSTHAN); 858 859 if (lval > rval) 860 return (DT_GREATERTHAN); 861 } 862 } 863 864 continue; 865 } 866 867 if (lval < rval) 868 return (DT_LESSTHAN); 869 870 if (lval > rval) 871 return (DT_GREATERTHAN); 872 } 873 874 return (0); 875 } 876 877 static int 878 dt_aggregate_valcmp(const void *lhs, const void *rhs) 879 { 880 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 881 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 882 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 883 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 884 caddr_t ldata = lh->dtahe_data.dtada_data; 885 caddr_t rdata = rh->dtahe_data.dtada_data; 886 dtrace_recdesc_t *lrec, *rrec; 887 int64_t *laddr, *raddr; 888 int rval; 889 890 assert(lagg->dtagd_nrecs == ragg->dtagd_nrecs); 891 892 lrec = &lagg->dtagd_rec[lagg->dtagd_nrecs - 1]; 893 rrec = &ragg->dtagd_rec[ragg->dtagd_nrecs - 1]; 894 895 assert(lrec->dtrd_action == rrec->dtrd_action); 896 897 laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset); 898 raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset); 899 900 switch (lrec->dtrd_action) { 901 case DTRACEAGG_AVG: 902 rval = dt_aggregate_averagecmp(laddr, raddr); 903 break; 904 905 case DTRACEAGG_STDDEV: 906 rval = dt_aggregate_stddevcmp(laddr, raddr); 907 break; 908 909 case DTRACEAGG_QUANTIZE: 910 rval = dt_aggregate_quantizedcmp(laddr, raddr); 911 break; 912 913 case DTRACEAGG_LQUANTIZE: 914 rval = dt_aggregate_lquantizedcmp(laddr, raddr); 915 break; 916 917 case DTRACEAGG_LLQUANTIZE: 918 rval = dt_aggregate_llquantizedcmp(laddr, raddr); 919 break; 920 921 case DTRACEAGG_COUNT: 922 case DTRACEAGG_SUM: 923 case DTRACEAGG_MIN: 924 case DTRACEAGG_MAX: 925 rval = dt_aggregate_countcmp(laddr, raddr); 926 break; 927 928 default: 929 assert(0); 930 } 931 932 return (rval); 933 } 934 935 static int 936 dt_aggregate_valkeycmp(const void *lhs, const void *rhs) 937 { 938 int rval; 939 940 if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0) 941 return (rval); 942 943 /* 944 * If we're here, the values for the two aggregation elements are 945 * equal. We already know that the key layout is the same for the two 946 * elements; we must now compare the keys themselves as a tie-breaker. 947 */ 948 return (dt_aggregate_keycmp(lhs, rhs)); 949 } 950 951 static int 952 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs) 953 { 954 int rval; 955 956 if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0) 957 return (rval); 958 959 return (dt_aggregate_varcmp(lhs, rhs)); 960 } 961 962 static int 963 dt_aggregate_varkeycmp(const void *lhs, const void *rhs) 964 { 965 int rval; 966 967 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 968 return (rval); 969 970 return (dt_aggregate_keycmp(lhs, rhs)); 971 } 972 973 static int 974 dt_aggregate_valvarcmp(const void *lhs, const void *rhs) 975 { 976 int rval; 977 978 if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0) 979 return (rval); 980 981 return (dt_aggregate_varcmp(lhs, rhs)); 982 } 983 984 static int 985 dt_aggregate_varvalcmp(const void *lhs, const void *rhs) 986 { 987 int rval; 988 989 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 990 return (rval); 991 992 return (dt_aggregate_valkeycmp(lhs, rhs)); 993 } 994 995 static int 996 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs) 997 { 998 return (dt_aggregate_keyvarcmp(rhs, lhs)); 999 } 1000 1001 static int 1002 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs) 1003 { 1004 return (dt_aggregate_varkeycmp(rhs, lhs)); 1005 } 1006 1007 static int 1008 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs) 1009 { 1010 return (dt_aggregate_valvarcmp(rhs, lhs)); 1011 } 1012 1013 static int 1014 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs) 1015 { 1016 return (dt_aggregate_varvalcmp(rhs, lhs)); 1017 } 1018 1019 static int 1020 dt_aggregate_bundlecmp(const void *lhs, const void *rhs) 1021 { 1022 dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs); 1023 dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs); 1024 int i, rval; 1025 1026 if (dt_keysort) { 1027 /* 1028 * If we're sorting on keys, we need to scan until we find the 1029 * last entry -- that's the representative key. (The order of 1030 * the bundle is values followed by key to accommodate the 1031 * default behavior of sorting by value.) If the keys are 1032 * equal, we'll fall into the value comparison loop, below. 1033 */ 1034 for (i = 0; lh[i + 1] != NULL; i++) 1035 continue; 1036 1037 assert(i != 0); 1038 assert(rh[i + 1] == NULL); 1039 1040 if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0) 1041 return (rval); 1042 } 1043 1044 for (i = 0; ; i++) { 1045 if (lh[i + 1] == NULL) { 1046 /* 1047 * All of the values are equal; if we're sorting on 1048 * keys, then we're only here because the keys were 1049 * found to be equal and these records are therefore 1050 * equal. If we're not sorting on keys, we'll use the 1051 * key comparison from the representative key as the 1052 * tie-breaker. 1053 */ 1054 if (dt_keysort) 1055 return (0); 1056 1057 assert(i != 0); 1058 assert(rh[i + 1] == NULL); 1059 return (dt_aggregate_keycmp(&lh[i], &rh[i])); 1060 } else { 1061 if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0) 1062 return (rval); 1063 } 1064 } 1065 } 1066 1067 int 1068 dt_aggregate_go(dtrace_hdl_t *dtp) 1069 { 1070 dt_aggregate_t *agp = &dtp->dt_aggregate; 1071 dtrace_optval_t size, cpu; 1072 dtrace_bufdesc_t *buf = &agp->dtat_buf; 1073 int rval, i; 1074 1075 assert(agp->dtat_maxcpu == 0); 1076 assert(agp->dtat_ncpu == 0); 1077 assert(agp->dtat_cpus == NULL); 1078 1079 agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 1080 agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX); 1081 agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t)); 1082 1083 if (agp->dtat_cpus == NULL) 1084 return (dt_set_errno(dtp, EDT_NOMEM)); 1085 1086 /* 1087 * Use the aggregation buffer size as reloaded from the kernel. 1088 */ 1089 size = dtp->dt_options[DTRACEOPT_AGGSIZE]; 1090 1091 rval = dtrace_getopt(dtp, "aggsize", &size); 1092 assert(rval == 0); 1093 1094 if (size == 0 || size == DTRACEOPT_UNSET) 1095 return (0); 1096 1097 buf = &agp->dtat_buf; 1098 buf->dtbd_size = size; 1099 1100 if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL) 1101 return (dt_set_errno(dtp, EDT_NOMEM)); 1102 1103 /* 1104 * Now query for the CPUs enabled. 1105 */ 1106 rval = dtrace_getopt(dtp, "cpu", &cpu); 1107 assert(rval == 0 && cpu != DTRACEOPT_UNSET); 1108 1109 if (cpu != DTRACE_CPUALL) { 1110 assert(cpu < agp->dtat_ncpu); 1111 agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu; 1112 1113 return (0); 1114 } 1115 1116 agp->dtat_ncpus = 0; 1117 for (i = 0; i < agp->dtat_maxcpu; i++) { 1118 if (dt_status(dtp, i) == -1) 1119 continue; 1120 1121 agp->dtat_cpus[agp->dtat_ncpus++] = i; 1122 } 1123 1124 return (0); 1125 } 1126 1127 static int 1128 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval) 1129 { 1130 dt_aggregate_t *agp = &dtp->dt_aggregate; 1131 dtrace_aggdata_t *data; 1132 dtrace_aggdesc_t *aggdesc; 1133 dtrace_recdesc_t *rec; 1134 int i; 1135 1136 switch (rval) { 1137 case DTRACE_AGGWALK_NEXT: 1138 break; 1139 1140 case DTRACE_AGGWALK_CLEAR: { 1141 uint32_t size, offs = 0; 1142 1143 aggdesc = h->dtahe_data.dtada_desc; 1144 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 1145 size = rec->dtrd_size; 1146 data = &h->dtahe_data; 1147 1148 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE || 1149 rec->dtrd_action == DTRACEAGG_LLQUANTIZE) { 1150 /* 1151 * For lquantize() and llquantize(), we want to be 1152 * sure to not zero the aggregation parameters; step 1153 * over them and adjust our size accordingly. 1154 */ 1155 offs = sizeof (uint64_t); 1156 size -= sizeof (uint64_t); 1157 } 1158 1159 bzero(&data->dtada_data[rec->dtrd_offset] + offs, size); 1160 1161 if (data->dtada_percpu == NULL) 1162 break; 1163 1164 for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++) 1165 bzero(data->dtada_percpu[i] + offs, size); 1166 break; 1167 } 1168 1169 case DTRACE_AGGWALK_ERROR: 1170 /* 1171 * We assume that errno is already set in this case. 1172 */ 1173 return (dt_set_errno(dtp, errno)); 1174 1175 case DTRACE_AGGWALK_ABORT: 1176 return (dt_set_errno(dtp, EDT_DIRABORT)); 1177 1178 case DTRACE_AGGWALK_DENORMALIZE: 1179 h->dtahe_data.dtada_normal = 1; 1180 return (0); 1181 1182 case DTRACE_AGGWALK_NORMALIZE: 1183 if (h->dtahe_data.dtada_normal == 0) { 1184 h->dtahe_data.dtada_normal = 1; 1185 return (dt_set_errno(dtp, EDT_BADRVAL)); 1186 } 1187 1188 return (0); 1189 1190 case DTRACE_AGGWALK_REMOVE: { 1191 dtrace_aggdata_t *aggdata = &h->dtahe_data; 1192 int i, max_cpus = agp->dtat_maxcpu; 1193 1194 /* 1195 * First, remove this hash entry from its hash chain. 1196 */ 1197 if (h->dtahe_prev != NULL) { 1198 h->dtahe_prev->dtahe_next = h->dtahe_next; 1199 } else { 1200 dt_ahash_t *hash = &agp->dtat_hash; 1201 size_t ndx = h->dtahe_hashval % hash->dtah_size; 1202 1203 assert(hash->dtah_hash[ndx] == h); 1204 hash->dtah_hash[ndx] = h->dtahe_next; 1205 } 1206 1207 if (h->dtahe_next != NULL) 1208 h->dtahe_next->dtahe_prev = h->dtahe_prev; 1209 1210 /* 1211 * Now remove it from the list of all hash entries. 1212 */ 1213 if (h->dtahe_prevall != NULL) { 1214 h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall; 1215 } else { 1216 dt_ahash_t *hash = &agp->dtat_hash; 1217 1218 assert(hash->dtah_all == h); 1219 hash->dtah_all = h->dtahe_nextall; 1220 } 1221 1222 if (h->dtahe_nextall != NULL) 1223 h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall; 1224 1225 /* 1226 * We're unlinked. We can safely destroy the data. 1227 */ 1228 if (aggdata->dtada_percpu != NULL) { 1229 for (i = 0; i < max_cpus; i++) 1230 free(aggdata->dtada_percpu[i]); 1231 free(aggdata->dtada_percpu); 1232 } 1233 1234 free(aggdata->dtada_data); 1235 free(h); 1236 1237 return (0); 1238 } 1239 1240 default: 1241 return (dt_set_errno(dtp, EDT_BADRVAL)); 1242 } 1243 1244 return (0); 1245 } 1246 1247 void 1248 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width, 1249 int (*compar)(const void *, const void *)) 1250 { 1251 int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos; 1252 dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS]; 1253 1254 dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET); 1255 dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET); 1256 1257 if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) { 1258 dt_keypos = (int)keyposopt; 1259 } else { 1260 dt_keypos = 0; 1261 } 1262 1263 if (compar == NULL) { 1264 if (!dt_keysort) { 1265 compar = dt_aggregate_varvalcmp; 1266 } else { 1267 compar = dt_aggregate_varkeycmp; 1268 } 1269 } 1270 1271 qsort(base, nel, width, compar); 1272 1273 dt_revsort = rev; 1274 dt_keysort = key; 1275 dt_keypos = keypos; 1276 } 1277 1278 int 1279 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg) 1280 { 1281 dt_ahashent_t *h, *next; 1282 dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash; 1283 1284 for (h = hash->dtah_all; h != NULL; h = next) { 1285 /* 1286 * dt_aggwalk_rval() can potentially remove the current hash 1287 * entry; we need to load the next hash entry before calling 1288 * into it. 1289 */ 1290 next = h->dtahe_nextall; 1291 1292 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) 1293 return (-1); 1294 } 1295 1296 return (0); 1297 } 1298 1299 static int 1300 dt_aggregate_total(dtrace_hdl_t *dtp, boolean_t clear) 1301 { 1302 dt_ahashent_t *h; 1303 dtrace_aggdata_t **total; 1304 dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id; 1305 dt_aggregate_t *agp = &dtp->dt_aggregate; 1306 dt_ahash_t *hash = &agp->dtat_hash; 1307 uint32_t tflags; 1308 1309 tflags = DTRACE_A_TOTAL | DTRACE_A_HASNEGATIVES | DTRACE_A_HASPOSITIVES; 1310 1311 /* 1312 * If we need to deliver per-aggregation totals, we're going to take 1313 * three passes over the aggregate: one to clear everything out and 1314 * determine our maximum aggregation ID, one to actually total 1315 * everything up, and a final pass to assign the totals to the 1316 * individual elements. 1317 */ 1318 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1319 dtrace_aggdata_t *aggdata = &h->dtahe_data; 1320 1321 if ((id = dt_aggregate_aggvarid(h)) > max) 1322 max = id; 1323 1324 aggdata->dtada_total = 0; 1325 aggdata->dtada_flags &= ~tflags; 1326 } 1327 1328 if (clear || max == DTRACE_AGGVARIDNONE) 1329 return (0); 1330 1331 total = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *)); 1332 1333 if (total == NULL) 1334 return (-1); 1335 1336 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1337 dtrace_aggdata_t *aggdata = &h->dtahe_data; 1338 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1339 dtrace_recdesc_t *rec; 1340 caddr_t data; 1341 int64_t val, *addr; 1342 1343 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 1344 data = aggdata->dtada_data; 1345 addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset); 1346 1347 switch (rec->dtrd_action) { 1348 case DTRACEAGG_STDDEV: 1349 val = dt_stddev((uint64_t *)addr, 1); 1350 break; 1351 1352 case DTRACEAGG_SUM: 1353 case DTRACEAGG_COUNT: 1354 val = *addr; 1355 break; 1356 1357 case DTRACEAGG_AVG: 1358 val = addr[0] ? (addr[1] / addr[0]) : 0; 1359 break; 1360 1361 default: 1362 continue; 1363 } 1364 1365 if (total[agg->dtagd_varid] == NULL) { 1366 total[agg->dtagd_varid] = aggdata; 1367 aggdata->dtada_flags |= DTRACE_A_TOTAL; 1368 } else { 1369 aggdata = total[agg->dtagd_varid]; 1370 } 1371 1372 if (val > 0) 1373 aggdata->dtada_flags |= DTRACE_A_HASPOSITIVES; 1374 1375 if (val < 0) { 1376 aggdata->dtada_flags |= DTRACE_A_HASNEGATIVES; 1377 val = -val; 1378 } 1379 1380 if (dtp->dt_options[DTRACEOPT_AGGZOOM] != DTRACEOPT_UNSET) { 1381 val = (int64_t)((long double)val * 1382 (1 / DTRACE_AGGZOOM_MAX)); 1383 1384 if (val > aggdata->dtada_total) 1385 aggdata->dtada_total = val; 1386 } else { 1387 aggdata->dtada_total += val; 1388 } 1389 } 1390 1391 /* 1392 * And now one final pass to set everyone's total. 1393 */ 1394 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1395 dtrace_aggdata_t *aggdata = &h->dtahe_data, *t; 1396 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1397 1398 if ((t = total[agg->dtagd_varid]) == NULL || aggdata == t) 1399 continue; 1400 1401 aggdata->dtada_total = t->dtada_total; 1402 aggdata->dtada_flags |= (t->dtada_flags & tflags); 1403 } 1404 1405 dt_free(dtp, total); 1406 1407 return (0); 1408 } 1409 1410 static int 1411 dt_aggregate_minmaxbin(dtrace_hdl_t *dtp, boolean_t clear) 1412 { 1413 dt_ahashent_t *h; 1414 dtrace_aggdata_t **minmax; 1415 dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id; 1416 dt_aggregate_t *agp = &dtp->dt_aggregate; 1417 dt_ahash_t *hash = &agp->dtat_hash; 1418 1419 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1420 dtrace_aggdata_t *aggdata = &h->dtahe_data; 1421 1422 if ((id = dt_aggregate_aggvarid(h)) > max) 1423 max = id; 1424 1425 aggdata->dtada_minbin = 0; 1426 aggdata->dtada_maxbin = 0; 1427 aggdata->dtada_flags &= ~DTRACE_A_MINMAXBIN; 1428 } 1429 1430 if (clear || max == DTRACE_AGGVARIDNONE) 1431 return (0); 1432 1433 minmax = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *)); 1434 1435 if (minmax == NULL) 1436 return (-1); 1437 1438 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1439 dtrace_aggdata_t *aggdata = &h->dtahe_data; 1440 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1441 dtrace_recdesc_t *rec; 1442 caddr_t data; 1443 int64_t *addr; 1444 int minbin = -1, maxbin = -1, i; 1445 int start = 0, size; 1446 1447 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 1448 size = rec->dtrd_size / sizeof (int64_t); 1449 data = aggdata->dtada_data; 1450 addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset); 1451 1452 switch (rec->dtrd_action) { 1453 case DTRACEAGG_LQUANTIZE: 1454 /* 1455 * For lquantize(), we always display the entire range 1456 * of the aggregation when aggpack is set. 1457 */ 1458 start = 1; 1459 minbin = start; 1460 maxbin = size - 1 - start; 1461 break; 1462 1463 case DTRACEAGG_QUANTIZE: 1464 for (i = start; i < size; i++) { 1465 if (!addr[i]) 1466 continue; 1467 1468 if (minbin == -1) 1469 minbin = i - start; 1470 1471 maxbin = i - start; 1472 } 1473 1474 if (minbin == -1) { 1475 /* 1476 * If we have no data (e.g., due to a clear() 1477 * or negative increments), we'll use the 1478 * zero bucket as both our min and max. 1479 */ 1480 minbin = maxbin = DTRACE_QUANTIZE_ZEROBUCKET; 1481 } 1482 1483 break; 1484 1485 default: 1486 continue; 1487 } 1488 1489 if (minmax[agg->dtagd_varid] == NULL) { 1490 minmax[agg->dtagd_varid] = aggdata; 1491 aggdata->dtada_flags |= DTRACE_A_MINMAXBIN; 1492 aggdata->dtada_minbin = minbin; 1493 aggdata->dtada_maxbin = maxbin; 1494 continue; 1495 } 1496 1497 if (minbin < minmax[agg->dtagd_varid]->dtada_minbin) 1498 minmax[agg->dtagd_varid]->dtada_minbin = minbin; 1499 1500 if (maxbin > minmax[agg->dtagd_varid]->dtada_maxbin) 1501 minmax[agg->dtagd_varid]->dtada_maxbin = maxbin; 1502 } 1503 1504 /* 1505 * And now one final pass to set everyone's minbin and maxbin. 1506 */ 1507 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1508 dtrace_aggdata_t *aggdata = &h->dtahe_data, *mm; 1509 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1510 1511 if ((mm = minmax[agg->dtagd_varid]) == NULL || aggdata == mm) 1512 continue; 1513 1514 aggdata->dtada_minbin = mm->dtada_minbin; 1515 aggdata->dtada_maxbin = mm->dtada_maxbin; 1516 aggdata->dtada_flags |= DTRACE_A_MINMAXBIN; 1517 } 1518 1519 dt_free(dtp, minmax); 1520 1521 return (0); 1522 } 1523 1524 static int 1525 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp, 1526 dtrace_aggregate_f *func, void *arg, 1527 int (*sfunc)(const void *, const void *)) 1528 { 1529 dt_aggregate_t *agp = &dtp->dt_aggregate; 1530 dt_ahashent_t *h, **sorted; 1531 dt_ahash_t *hash = &agp->dtat_hash; 1532 size_t i, nentries = 0; 1533 int rval = -1; 1534 1535 agp->dtat_flags &= ~(DTRACE_A_TOTAL | DTRACE_A_MINMAXBIN); 1536 1537 if (dtp->dt_options[DTRACEOPT_AGGHIST] != DTRACEOPT_UNSET) { 1538 agp->dtat_flags |= DTRACE_A_TOTAL; 1539 1540 if (dt_aggregate_total(dtp, B_FALSE) != 0) 1541 return (-1); 1542 } 1543 1544 if (dtp->dt_options[DTRACEOPT_AGGPACK] != DTRACEOPT_UNSET) { 1545 agp->dtat_flags |= DTRACE_A_MINMAXBIN; 1546 1547 if (dt_aggregate_minmaxbin(dtp, B_FALSE) != 0) 1548 return (-1); 1549 } 1550 1551 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) 1552 nentries++; 1553 1554 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 1555 1556 if (sorted == NULL) 1557 goto out; 1558 1559 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) 1560 sorted[i++] = h; 1561 1562 (void) pthread_mutex_lock(&dt_qsort_lock); 1563 1564 if (sfunc == NULL) { 1565 dt_aggregate_qsort(dtp, sorted, nentries, 1566 sizeof (dt_ahashent_t *), NULL); 1567 } else { 1568 /* 1569 * If we've been explicitly passed a sorting function, 1570 * we'll use that -- ignoring the values of the "aggsortrev", 1571 * "aggsortkey" and "aggsortkeypos" options. 1572 */ 1573 qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc); 1574 } 1575 1576 (void) pthread_mutex_unlock(&dt_qsort_lock); 1577 1578 for (i = 0; i < nentries; i++) { 1579 h = sorted[i]; 1580 1581 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) 1582 goto out; 1583 } 1584 1585 rval = 0; 1586 out: 1587 if (agp->dtat_flags & DTRACE_A_TOTAL) 1588 (void) dt_aggregate_total(dtp, B_TRUE); 1589 1590 if (agp->dtat_flags & DTRACE_A_MINMAXBIN) 1591 (void) dt_aggregate_minmaxbin(dtp, B_TRUE); 1592 1593 dt_free(dtp, sorted); 1594 return (rval); 1595 } 1596 1597 int 1598 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp, 1599 dtrace_aggregate_f *func, void *arg) 1600 { 1601 return (dt_aggregate_walk_sorted(dtp, func, arg, NULL)); 1602 } 1603 1604 int 1605 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp, 1606 dtrace_aggregate_f *func, void *arg) 1607 { 1608 return (dt_aggregate_walk_sorted(dtp, func, 1609 arg, dt_aggregate_varkeycmp)); 1610 } 1611 1612 int 1613 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp, 1614 dtrace_aggregate_f *func, void *arg) 1615 { 1616 return (dt_aggregate_walk_sorted(dtp, func, 1617 arg, dt_aggregate_varvalcmp)); 1618 } 1619 1620 int 1621 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp, 1622 dtrace_aggregate_f *func, void *arg) 1623 { 1624 return (dt_aggregate_walk_sorted(dtp, func, 1625 arg, dt_aggregate_keyvarcmp)); 1626 } 1627 1628 int 1629 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp, 1630 dtrace_aggregate_f *func, void *arg) 1631 { 1632 return (dt_aggregate_walk_sorted(dtp, func, 1633 arg, dt_aggregate_valvarcmp)); 1634 } 1635 1636 int 1637 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp, 1638 dtrace_aggregate_f *func, void *arg) 1639 { 1640 return (dt_aggregate_walk_sorted(dtp, func, 1641 arg, dt_aggregate_varkeyrevcmp)); 1642 } 1643 1644 int 1645 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp, 1646 dtrace_aggregate_f *func, void *arg) 1647 { 1648 return (dt_aggregate_walk_sorted(dtp, func, 1649 arg, dt_aggregate_varvalrevcmp)); 1650 } 1651 1652 int 1653 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp, 1654 dtrace_aggregate_f *func, void *arg) 1655 { 1656 return (dt_aggregate_walk_sorted(dtp, func, 1657 arg, dt_aggregate_keyvarrevcmp)); 1658 } 1659 1660 int 1661 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp, 1662 dtrace_aggregate_f *func, void *arg) 1663 { 1664 return (dt_aggregate_walk_sorted(dtp, func, 1665 arg, dt_aggregate_valvarrevcmp)); 1666 } 1667 1668 int 1669 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars, 1670 int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg) 1671 { 1672 dt_aggregate_t *agp = &dtp->dt_aggregate; 1673 dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle; 1674 const dtrace_aggdata_t **data; 1675 dt_ahashent_t *zaggdata = NULL; 1676 dt_ahash_t *hash = &agp->dtat_hash; 1677 size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize; 1678 dtrace_aggvarid_t max = 0, aggvar; 1679 int rval = -1, *map, *remap = NULL; 1680 int i, j; 1681 dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS]; 1682 1683 /* 1684 * If the sorting position is greater than the number of aggregation 1685 * variable IDs, we silently set it to 0. 1686 */ 1687 if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars) 1688 sortpos = 0; 1689 1690 /* 1691 * First we need to translate the specified aggregation variable IDs 1692 * into a linear map that will allow us to translate an aggregation 1693 * variable ID into its position in the specified aggvars. 1694 */ 1695 for (i = 0; i < naggvars; i++) { 1696 if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0) 1697 return (dt_set_errno(dtp, EDT_BADAGGVAR)); 1698 1699 if (aggvars[i] > max) 1700 max = aggvars[i]; 1701 } 1702 1703 if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL) 1704 return (-1); 1705 1706 zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t)); 1707 1708 if (zaggdata == NULL) 1709 goto out; 1710 1711 for (i = 0; i < naggvars; i++) { 1712 int ndx = i + sortpos; 1713 1714 if (ndx >= naggvars) 1715 ndx -= naggvars; 1716 1717 aggvar = aggvars[ndx]; 1718 assert(aggvar <= max); 1719 1720 if (map[aggvar]) { 1721 /* 1722 * We have an aggregation variable that is present 1723 * more than once in the array of aggregation 1724 * variables. While it's unclear why one might want 1725 * to do this, it's legal. To support this construct, 1726 * we will allocate a remap that will indicate the 1727 * position from which this aggregation variable 1728 * should be pulled. (That is, where the remap will 1729 * map from one position to another.) 1730 */ 1731 if (remap == NULL) { 1732 remap = dt_zalloc(dtp, naggvars * sizeof (int)); 1733 1734 if (remap == NULL) 1735 goto out; 1736 } 1737 1738 /* 1739 * Given that the variable is already present, assert 1740 * that following through the mapping and adjusting 1741 * for the sort position yields the same aggregation 1742 * variable ID. 1743 */ 1744 assert(aggvars[(map[aggvar] - 1 + sortpos) % 1745 naggvars] == aggvars[ndx]); 1746 1747 remap[i] = map[aggvar]; 1748 continue; 1749 } 1750 1751 map[aggvar] = i + 1; 1752 } 1753 1754 /* 1755 * We need to take two passes over the data to size our allocation, so 1756 * we'll use the first pass to also fill in the zero-filled data to be 1757 * used to properly format a zero-valued aggregation. 1758 */ 1759 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1760 dtrace_aggvarid_t id; 1761 int ndx; 1762 1763 if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id])) 1764 continue; 1765 1766 if (zaggdata[ndx - 1].dtahe_size == 0) { 1767 zaggdata[ndx - 1].dtahe_size = h->dtahe_size; 1768 zaggdata[ndx - 1].dtahe_data = h->dtahe_data; 1769 } 1770 1771 nentries++; 1772 } 1773 1774 if (nentries == 0) { 1775 /* 1776 * We couldn't find any entries; there is nothing else to do. 1777 */ 1778 rval = 0; 1779 goto out; 1780 } 1781 1782 /* 1783 * Before we sort the data, we're going to look for any holes in our 1784 * zero-filled data. This will occur if an aggregation variable that 1785 * we are being asked to print has not yet been assigned the result of 1786 * any aggregating action for _any_ tuple. The issue becomes that we 1787 * would like a zero value to be printed for all columns for this 1788 * aggregation, but without any record description, we don't know the 1789 * aggregating action that corresponds to the aggregation variable. To 1790 * try to find a match, we're simply going to lookup aggregation IDs 1791 * (which are guaranteed to be contiguous and to start from 1), looking 1792 * for the specified aggregation variable ID. If we find a match, 1793 * we'll use that. If we iterate over all aggregation IDs and don't 1794 * find a match, then we must be an anonymous enabling. (Anonymous 1795 * enablings can't currently derive either aggregation variable IDs or 1796 * aggregation variable names given only an aggregation ID.) In this 1797 * obscure case (anonymous enabling, multiple aggregation printa() with 1798 * some aggregations not represented for any tuple), our defined 1799 * behavior is that the zero will be printed in the format of the first 1800 * aggregation variable that contains any non-zero value. 1801 */ 1802 for (i = 0; i < naggvars; i++) { 1803 if (zaggdata[i].dtahe_size == 0) { 1804 dtrace_aggvarid_t aggvar; 1805 1806 aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 1807 assert(zaggdata[i].dtahe_data.dtada_data == NULL); 1808 1809 for (j = DTRACE_AGGIDNONE + 1; ; j++) { 1810 dtrace_aggdesc_t *agg; 1811 dtrace_aggdata_t *aggdata; 1812 1813 if (dt_aggid_lookup(dtp, j, &agg) != 0) 1814 break; 1815 1816 if (agg->dtagd_varid != aggvar) 1817 continue; 1818 1819 /* 1820 * We have our description -- now we need to 1821 * cons up the zaggdata entry for it. 1822 */ 1823 aggdata = &zaggdata[i].dtahe_data; 1824 aggdata->dtada_size = agg->dtagd_size; 1825 aggdata->dtada_desc = agg; 1826 aggdata->dtada_handle = dtp; 1827 (void) dt_epid_lookup(dtp, agg->dtagd_epid, 1828 &aggdata->dtada_edesc, 1829 &aggdata->dtada_pdesc); 1830 aggdata->dtada_normal = 1; 1831 zaggdata[i].dtahe_hashval = 0; 1832 zaggdata[i].dtahe_size = agg->dtagd_size; 1833 break; 1834 } 1835 1836 if (zaggdata[i].dtahe_size == 0) { 1837 caddr_t data; 1838 1839 /* 1840 * We couldn't find this aggregation, meaning 1841 * that we have never seen it before for any 1842 * tuple _and_ this is an anonymous enabling. 1843 * That is, we're in the obscure case outlined 1844 * above. In this case, our defined behavior 1845 * is to format the data in the format of the 1846 * first non-zero aggregation -- of which, of 1847 * course, we know there to be at least one 1848 * (or nentries would have been zero). 1849 */ 1850 for (j = 0; j < naggvars; j++) { 1851 if (zaggdata[j].dtahe_size != 0) 1852 break; 1853 } 1854 1855 assert(j < naggvars); 1856 zaggdata[i] = zaggdata[j]; 1857 1858 data = zaggdata[i].dtahe_data.dtada_data; 1859 assert(data != NULL); 1860 } 1861 } 1862 } 1863 1864 /* 1865 * Now we need to allocate our zero-filled data for use for 1866 * aggregations that don't have a value corresponding to a given key. 1867 */ 1868 for (i = 0; i < naggvars; i++) { 1869 dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data; 1870 dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc; 1871 dtrace_recdesc_t *rec; 1872 uint64_t larg; 1873 caddr_t zdata; 1874 1875 zsize = zaggdata[i].dtahe_size; 1876 assert(zsize != 0); 1877 1878 if ((zdata = dt_zalloc(dtp, zsize)) == NULL) { 1879 /* 1880 * If we failed to allocated some zero-filled data, we 1881 * need to zero out the remaining dtada_data pointers 1882 * to prevent the wrong data from being freed below. 1883 */ 1884 for (j = i; j < naggvars; j++) 1885 zaggdata[j].dtahe_data.dtada_data = NULL; 1886 goto out; 1887 } 1888 1889 aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 1890 1891 /* 1892 * First, the easy bit. To maintain compatibility with 1893 * consumers that pull the compiler-generated ID out of the 1894 * data, we put that ID at the top of the zero-filled data. 1895 */ 1896 rec = &aggdesc->dtagd_rec[0]; 1897 /* LINTED - alignment */ 1898 *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar; 1899 1900 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 1901 1902 /* 1903 * Now for the more complicated part. For the lquantize() and 1904 * llquantize() aggregating actions, zero-filled data is not 1905 * equivalent to an empty record: we must also get the 1906 * parameters for the lquantize()/llquantize(). 1907 */ 1908 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE || 1909 rec->dtrd_action == DTRACEAGG_LLQUANTIZE) { 1910 if (aggdata->dtada_data != NULL) { 1911 /* 1912 * The easier case here is if we actually have 1913 * some prototype data -- in which case we 1914 * manually dig it out of the aggregation 1915 * record. 1916 */ 1917 /* LINTED - alignment */ 1918 larg = *((uint64_t *)(aggdata->dtada_data + 1919 rec->dtrd_offset)); 1920 } else { 1921 /* 1922 * We don't have any prototype data. As a 1923 * result, we know that we _do_ have the 1924 * compiler-generated information. (If this 1925 * were an anonymous enabling, all of our 1926 * zero-filled data would have prototype data 1927 * -- either directly or indirectly.) So as 1928 * gross as it is, we'll grovel around in the 1929 * compiler-generated information to find the 1930 * lquantize()/llquantize() parameters. 1931 */ 1932 dtrace_stmtdesc_t *sdp; 1933 dt_ident_t *aid; 1934 dt_idsig_t *isp; 1935 1936 sdp = (dtrace_stmtdesc_t *)(uintptr_t) 1937 aggdesc->dtagd_rec[0].dtrd_uarg; 1938 aid = sdp->dtsd_aggdata; 1939 isp = (dt_idsig_t *)aid->di_data; 1940 assert(isp->dis_auxinfo != 0); 1941 larg = isp->dis_auxinfo; 1942 } 1943 1944 /* LINTED - alignment */ 1945 *((uint64_t *)(zdata + rec->dtrd_offset)) = larg; 1946 } 1947 1948 aggdata->dtada_data = zdata; 1949 } 1950 1951 /* 1952 * Now that we've dealt with setting up our zero-filled data, we can 1953 * allocate our sorted array, and take another pass over the data to 1954 * fill it. 1955 */ 1956 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 1957 1958 if (sorted == NULL) 1959 goto out; 1960 1961 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) { 1962 dtrace_aggvarid_t id; 1963 1964 if ((id = dt_aggregate_aggvarid(h)) > max || !map[id]) 1965 continue; 1966 1967 sorted[i++] = h; 1968 } 1969 1970 assert(i == nentries); 1971 1972 /* 1973 * We've loaded our array; now we need to sort by value to allow us 1974 * to create bundles of like value. We're going to acquire the 1975 * dt_qsort_lock here, and hold it across all of our subsequent 1976 * comparison and sorting. 1977 */ 1978 (void) pthread_mutex_lock(&dt_qsort_lock); 1979 1980 qsort(sorted, nentries, sizeof (dt_ahashent_t *), 1981 dt_aggregate_keyvarcmp); 1982 1983 /* 1984 * Now we need to go through and create bundles. Because the number 1985 * of bundles is bounded by the size of the sorted array, we're going 1986 * to reuse the underlying storage. And note that "bundle" is an 1987 * array of pointers to arrays of pointers to dt_ahashent_t -- making 1988 * its type (regrettably) "dt_ahashent_t ***". (Regrettable because 1989 * '*' -- like '_' and 'X' -- should never appear in triplicate in 1990 * an ideal world.) 1991 */ 1992 bundle = (dt_ahashent_t ***)sorted; 1993 1994 for (i = 1, start = 0; i <= nentries; i++) { 1995 if (i < nentries && 1996 dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0) 1997 continue; 1998 1999 /* 2000 * We have a bundle boundary. Everything from start to 2001 * (i - 1) belongs in one bundle. 2002 */ 2003 assert(i - start <= naggvars); 2004 bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *); 2005 2006 if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) { 2007 (void) pthread_mutex_unlock(&dt_qsort_lock); 2008 goto out; 2009 } 2010 2011 for (j = start; j < i; j++) { 2012 dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]); 2013 2014 assert(id <= max); 2015 assert(map[id] != 0); 2016 assert(map[id] - 1 < naggvars); 2017 assert(nbundle[map[id] - 1] == NULL); 2018 nbundle[map[id] - 1] = sorted[j]; 2019 2020 if (nbundle[naggvars] == NULL) 2021 nbundle[naggvars] = sorted[j]; 2022 } 2023 2024 for (j = 0; j < naggvars; j++) { 2025 if (nbundle[j] != NULL) 2026 continue; 2027 2028 /* 2029 * Before we assume that this aggregation variable 2030 * isn't present (and fall back to using the 2031 * zero-filled data allocated earlier), check the 2032 * remap. If we have a remapping, we'll drop it in 2033 * here. Note that we might be remapping an 2034 * aggregation variable that isn't present for this 2035 * key; in this case, the aggregation data that we 2036 * copy will point to the zeroed data. 2037 */ 2038 if (remap != NULL && remap[j]) { 2039 assert(remap[j] - 1 < j); 2040 assert(nbundle[remap[j] - 1] != NULL); 2041 nbundle[j] = nbundle[remap[j] - 1]; 2042 } else { 2043 nbundle[j] = &zaggdata[j]; 2044 } 2045 } 2046 2047 bundle[nbundles++] = nbundle; 2048 start = i; 2049 } 2050 2051 /* 2052 * Now we need to re-sort based on the first value. 2053 */ 2054 dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **), 2055 dt_aggregate_bundlecmp); 2056 2057 (void) pthread_mutex_unlock(&dt_qsort_lock); 2058 2059 /* 2060 * We're done! Now we just need to go back over the sorted bundles, 2061 * calling the function. 2062 */ 2063 data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *)); 2064 2065 for (i = 0; i < nbundles; i++) { 2066 for (j = 0; j < naggvars; j++) 2067 data[j + 1] = NULL; 2068 2069 for (j = 0; j < naggvars; j++) { 2070 int ndx = j - sortpos; 2071 2072 if (ndx < 0) 2073 ndx += naggvars; 2074 2075 assert(bundle[i][ndx] != NULL); 2076 data[j + 1] = &bundle[i][ndx]->dtahe_data; 2077 } 2078 2079 for (j = 0; j < naggvars; j++) 2080 assert(data[j + 1] != NULL); 2081 2082 /* 2083 * The representative key is the last element in the bundle. 2084 * Assert that we have one, and then set it to be the first 2085 * element of data. 2086 */ 2087 assert(bundle[i][j] != NULL); 2088 data[0] = &bundle[i][j]->dtahe_data; 2089 2090 if ((rval = func(data, naggvars + 1, arg)) == -1) 2091 goto out; 2092 } 2093 2094 rval = 0; 2095 out: 2096 for (i = 0; i < nbundles; i++) 2097 dt_free(dtp, bundle[i]); 2098 2099 if (zaggdata != NULL) { 2100 for (i = 0; i < naggvars; i++) 2101 dt_free(dtp, zaggdata[i].dtahe_data.dtada_data); 2102 } 2103 2104 dt_free(dtp, zaggdata); 2105 dt_free(dtp, sorted); 2106 dt_free(dtp, remap); 2107 dt_free(dtp, map); 2108 2109 return (rval); 2110 } 2111 2112 int 2113 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp, 2114 dtrace_aggregate_walk_f *func) 2115 { 2116 dt_print_aggdata_t pd; 2117 2118 bzero(&pd, sizeof (pd)); 2119 2120 pd.dtpa_dtp = dtp; 2121 pd.dtpa_fp = fp; 2122 pd.dtpa_allunprint = 1; 2123 2124 if (func == NULL) 2125 func = dtrace_aggregate_walk_sorted; 2126 2127 if ((*func)(dtp, dt_print_agg, &pd) == -1) 2128 return (dt_set_errno(dtp, dtp->dt_errno)); 2129 2130 return (0); 2131 } 2132 2133 void 2134 dtrace_aggregate_clear(dtrace_hdl_t *dtp) 2135 { 2136 dt_aggregate_t *agp = &dtp->dt_aggregate; 2137 dt_ahash_t *hash = &agp->dtat_hash; 2138 dt_ahashent_t *h; 2139 dtrace_aggdata_t *data; 2140 dtrace_aggdesc_t *aggdesc; 2141 dtrace_recdesc_t *rec; 2142 int i, max_cpus = agp->dtat_maxcpu; 2143 2144 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 2145 aggdesc = h->dtahe_data.dtada_desc; 2146 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 2147 data = &h->dtahe_data; 2148 2149 bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size); 2150 2151 if (data->dtada_percpu == NULL) 2152 continue; 2153 2154 for (i = 0; i < max_cpus; i++) 2155 bzero(data->dtada_percpu[i], rec->dtrd_size); 2156 } 2157 } 2158 2159 void 2160 dt_aggregate_destroy(dtrace_hdl_t *dtp) 2161 { 2162 dt_aggregate_t *agp = &dtp->dt_aggregate; 2163 dt_ahash_t *hash = &agp->dtat_hash; 2164 dt_ahashent_t *h, *next; 2165 dtrace_aggdata_t *aggdata; 2166 int i, max_cpus = agp->dtat_maxcpu; 2167 2168 if (hash->dtah_hash == NULL) { 2169 assert(hash->dtah_all == NULL); 2170 } else { 2171 free(hash->dtah_hash); 2172 2173 for (h = hash->dtah_all; h != NULL; h = next) { 2174 next = h->dtahe_nextall; 2175 2176 aggdata = &h->dtahe_data; 2177 2178 if (aggdata->dtada_percpu != NULL) { 2179 for (i = 0; i < max_cpus; i++) 2180 free(aggdata->dtada_percpu[i]); 2181 free(aggdata->dtada_percpu); 2182 } 2183 2184 free(aggdata->dtada_data); 2185 free(h); 2186 } 2187 2188 hash->dtah_hash = NULL; 2189 hash->dtah_all = NULL; 2190 hash->dtah_size = 0; 2191 } 2192 2193 free(agp->dtat_buf.dtbd_data); 2194 free(agp->dtat_cpus); 2195 } 2196