xref: /freebsd/cddl/contrib/opensolaris/lib/libdtrace/common/dt_aggregate.c (revision 1bb8b1d7e190d75597d31ab87364a058e8ef8c5b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2012 by Delphix. All rights reserved.
30  */
31 
32 #include <stdlib.h>
33 #include <strings.h>
34 #include <errno.h>
35 #include <unistd.h>
36 #include <dt_impl.h>
37 #include <assert.h>
38 #include <dt_oformat.h>
39 #ifdef illumos
40 #include <alloca.h>
41 #else
42 #include <sys/sysctl.h>
43 #include <libproc_compat.h>
44 #endif
45 #include <limits.h>
46 
47 #define	DTRACE_AHASHSIZE	32779		/* big 'ol prime */
48 
49 /*
50  * Because qsort(3C) does not allow an argument to be passed to a comparison
51  * function, the variables that affect comparison must regrettably be global;
52  * they are protected by a global static lock, dt_qsort_lock.
53  */
54 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
55 
56 static int dt_revsort;
57 static int dt_keysort;
58 static int dt_keypos;
59 
60 #define	DT_LESSTHAN	(dt_revsort == 0 ? -1 : 1)
61 #define	DT_GREATERTHAN	(dt_revsort == 0 ? 1 : -1)
62 
63 static void
dt_aggregate_count(int64_t * existing,int64_t * new,size_t size)64 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
65 {
66 	uint_t i;
67 
68 	for (i = 0; i < size / sizeof (int64_t); i++)
69 		existing[i] = existing[i] + new[i];
70 }
71 
72 static int
dt_aggregate_countcmp(int64_t * lhs,int64_t * rhs)73 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
74 {
75 	int64_t lvar = *lhs;
76 	int64_t rvar = *rhs;
77 
78 	if (lvar < rvar)
79 		return (DT_LESSTHAN);
80 
81 	if (lvar > rvar)
82 		return (DT_GREATERTHAN);
83 
84 	return (0);
85 }
86 
87 /*ARGSUSED*/
88 static void
dt_aggregate_min(int64_t * existing,int64_t * new,size_t size)89 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
90 {
91 	if (*new < *existing)
92 		*existing = *new;
93 }
94 
95 /*ARGSUSED*/
96 static void
dt_aggregate_max(int64_t * existing,int64_t * new,size_t size)97 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
98 {
99 	if (*new > *existing)
100 		*existing = *new;
101 }
102 
103 static int
dt_aggregate_averagecmp(int64_t * lhs,int64_t * rhs)104 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
105 {
106 	int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
107 	int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
108 
109 	if (lavg < ravg)
110 		return (DT_LESSTHAN);
111 
112 	if (lavg > ravg)
113 		return (DT_GREATERTHAN);
114 
115 	return (0);
116 }
117 
118 static int
dt_aggregate_stddevcmp(int64_t * lhs,int64_t * rhs)119 dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
120 {
121 	uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
122 	uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
123 
124 	if (lsd < rsd)
125 		return (DT_LESSTHAN);
126 
127 	if (lsd > rsd)
128 		return (DT_GREATERTHAN);
129 
130 	return (0);
131 }
132 
133 /*ARGSUSED*/
134 static void
dt_aggregate_lquantize(int64_t * existing,int64_t * new,size_t size)135 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
136 {
137 	int64_t arg = *existing++;
138 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
139 	int i;
140 
141 	for (i = 0; i <= levels + 1; i++)
142 		existing[i] = existing[i] + new[i + 1];
143 }
144 
145 static long double
dt_aggregate_lquantizedsum(int64_t * lquanta)146 dt_aggregate_lquantizedsum(int64_t *lquanta)
147 {
148 	int64_t arg = *lquanta++;
149 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
150 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
151 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
152 	long double total = (long double)lquanta[0] * (long double)(base - 1);
153 
154 	for (i = 0; i < levels; base += step, i++)
155 		total += (long double)lquanta[i + 1] * (long double)base;
156 
157 	return (total + (long double)lquanta[levels + 1] *
158 	    (long double)(base + 1));
159 }
160 
161 static int64_t
dt_aggregate_lquantizedzero(int64_t * lquanta)162 dt_aggregate_lquantizedzero(int64_t *lquanta)
163 {
164 	int64_t arg = *lquanta++;
165 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
166 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
167 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
168 
169 	if (base - 1 == 0)
170 		return (lquanta[0]);
171 
172 	for (i = 0; i < levels; base += step, i++) {
173 		if (base != 0)
174 			continue;
175 
176 		return (lquanta[i + 1]);
177 	}
178 
179 	if (base + 1 == 0)
180 		return (lquanta[levels + 1]);
181 
182 	return (0);
183 }
184 
185 static int
dt_aggregate_lquantizedcmp(int64_t * lhs,int64_t * rhs)186 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
187 {
188 	long double lsum = dt_aggregate_lquantizedsum(lhs);
189 	long double rsum = dt_aggregate_lquantizedsum(rhs);
190 	int64_t lzero, rzero;
191 
192 	if (lsum < rsum)
193 		return (DT_LESSTHAN);
194 
195 	if (lsum > rsum)
196 		return (DT_GREATERTHAN);
197 
198 	/*
199 	 * If they're both equal, then we will compare based on the weights at
200 	 * zero.  If the weights at zero are equal (or if zero is not within
201 	 * the range of the linear quantization), then this will be judged a
202 	 * tie and will be resolved based on the key comparison.
203 	 */
204 	lzero = dt_aggregate_lquantizedzero(lhs);
205 	rzero = dt_aggregate_lquantizedzero(rhs);
206 
207 	if (lzero < rzero)
208 		return (DT_LESSTHAN);
209 
210 	if (lzero > rzero)
211 		return (DT_GREATERTHAN);
212 
213 	return (0);
214 }
215 
216 static void
dt_aggregate_llquantize(int64_t * existing,int64_t * new,size_t size)217 dt_aggregate_llquantize(int64_t *existing, int64_t *new, size_t size)
218 {
219 	int i;
220 
221 	for (i = 1; i < size / sizeof (int64_t); i++)
222 		existing[i] = existing[i] + new[i];
223 }
224 
225 static long double
dt_aggregate_llquantizedsum(int64_t * llquanta)226 dt_aggregate_llquantizedsum(int64_t *llquanta)
227 {
228 	int64_t arg = *llquanta++;
229 	uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
230 	uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
231 	uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
232 	uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
233 	int bin = 0, order;
234 	int64_t value = 1, next, step;
235 	long double total;
236 
237 	assert(nsteps >= factor);
238 	assert(nsteps % factor == 0);
239 
240 	for (order = 0; order < low; order++)
241 		value *= factor;
242 
243 	total = (long double)llquanta[bin++] * (long double)(value - 1);
244 
245 	next = value * factor;
246 	step = next > nsteps ? next / nsteps : 1;
247 
248 	while (order <= high) {
249 		assert(value < next);
250 		total += (long double)llquanta[bin++] * (long double)(value);
251 
252 		if ((value += step) != next)
253 			continue;
254 
255 		next = value * factor;
256 		step = next > nsteps ? next / nsteps : 1;
257 		order++;
258 	}
259 
260 	return (total + (long double)llquanta[bin] * (long double)value);
261 }
262 
263 static int
dt_aggregate_llquantizedcmp(int64_t * lhs,int64_t * rhs)264 dt_aggregate_llquantizedcmp(int64_t *lhs, int64_t *rhs)
265 {
266 	long double lsum = dt_aggregate_llquantizedsum(lhs);
267 	long double rsum = dt_aggregate_llquantizedsum(rhs);
268 	int64_t lzero, rzero;
269 
270 	if (lsum < rsum)
271 		return (DT_LESSTHAN);
272 
273 	if (lsum > rsum)
274 		return (DT_GREATERTHAN);
275 
276 	/*
277 	 * If they're both equal, then we will compare based on the weights at
278 	 * zero.  If the weights at zero are equal, then this will be judged a
279 	 * tie and will be resolved based on the key comparison.
280 	 */
281 	lzero = lhs[1];
282 	rzero = rhs[1];
283 
284 	if (lzero < rzero)
285 		return (DT_LESSTHAN);
286 
287 	if (lzero > rzero)
288 		return (DT_GREATERTHAN);
289 
290 	return (0);
291 }
292 
293 static int
dt_aggregate_quantizedcmp(int64_t * lhs,int64_t * rhs)294 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
295 {
296 	int nbuckets = DTRACE_QUANTIZE_NBUCKETS;
297 	long double ltotal = 0, rtotal = 0;
298 	int64_t lzero, rzero;
299 	uint_t i;
300 
301 	for (i = 0; i < nbuckets; i++) {
302 		int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
303 
304 		if (bucketval == 0) {
305 			lzero = lhs[i];
306 			rzero = rhs[i];
307 		}
308 
309 		ltotal += (long double)bucketval * (long double)lhs[i];
310 		rtotal += (long double)bucketval * (long double)rhs[i];
311 	}
312 
313 	if (ltotal < rtotal)
314 		return (DT_LESSTHAN);
315 
316 	if (ltotal > rtotal)
317 		return (DT_GREATERTHAN);
318 
319 	/*
320 	 * If they're both equal, then we will compare based on the weights at
321 	 * zero.  If the weights at zero are equal, then this will be judged a
322 	 * tie and will be resolved based on the key comparison.
323 	 */
324 	if (lzero < rzero)
325 		return (DT_LESSTHAN);
326 
327 	if (lzero > rzero)
328 		return (DT_GREATERTHAN);
329 
330 	return (0);
331 }
332 
333 static void
dt_aggregate_usym(dtrace_hdl_t * dtp,uint64_t * data)334 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
335 {
336 	uint64_t pid = data[0];
337 	uint64_t *pc = &data[1];
338 	struct ps_prochandle *P;
339 	GElf_Sym sym;
340 
341 	if (dtp->dt_vector != NULL)
342 		return;
343 
344 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
345 		return;
346 
347 	dt_proc_lock(dtp, P);
348 
349 	if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
350 		*pc = sym.st_value;
351 
352 	dt_proc_unlock(dtp, P);
353 	dt_proc_release(dtp, P);
354 }
355 
356 static void
dt_aggregate_umod(dtrace_hdl_t * dtp,uint64_t * data)357 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
358 {
359 	uint64_t pid = data[0];
360 	uint64_t *pc = &data[1];
361 	struct ps_prochandle *P;
362 	const prmap_t *map;
363 
364 	if (dtp->dt_vector != NULL)
365 		return;
366 
367 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
368 		return;
369 
370 	dt_proc_lock(dtp, P);
371 
372 	if ((map = Paddr_to_map(P, *pc)) != NULL)
373 		*pc = map->pr_vaddr;
374 
375 	dt_proc_unlock(dtp, P);
376 	dt_proc_release(dtp, P);
377 }
378 
379 static void
dt_aggregate_sym(dtrace_hdl_t * dtp,uint64_t * data)380 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
381 {
382 	GElf_Sym sym;
383 	uint64_t *pc = data;
384 
385 	if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
386 		*pc = sym.st_value;
387 }
388 
389 static void
dt_aggregate_mod(dtrace_hdl_t * dtp,uint64_t * data)390 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
391 {
392 	uint64_t *pc = data;
393 	dt_module_t *dmp;
394 
395 	if (dtp->dt_vector != NULL) {
396 		/*
397 		 * We don't have a way of just getting the module for a
398 		 * vectored open, and it doesn't seem to be worth defining
399 		 * one.  This means that use of mod() won't get true
400 		 * aggregation in the postmortem case (some modules may
401 		 * appear more than once in aggregation output).  It seems
402 		 * unlikely that anyone will ever notice or care...
403 		 */
404 		return;
405 	}
406 
407 	for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
408 	    dmp = dt_list_next(dmp)) {
409 		if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
410 			*pc = dmp->dm_text_va;
411 			return;
412 		}
413 	}
414 }
415 
416 static dtrace_aggvarid_t
dt_aggregate_aggvarid(dt_ahashent_t * ent)417 dt_aggregate_aggvarid(dt_ahashent_t *ent)
418 {
419 	dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
420 	caddr_t data = ent->dtahe_data.dtada_data;
421 	dtrace_recdesc_t *rec = agg->dtagd_rec;
422 
423 	/*
424 	 * First, we'll check the variable ID in the aggdesc.  If it's valid,
425 	 * we'll return it.  If not, we'll use the compiler-generated ID
426 	 * present as the first record.
427 	 */
428 	if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
429 		return (agg->dtagd_varid);
430 
431 	agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
432 	    rec->dtrd_offset));
433 
434 	return (agg->dtagd_varid);
435 }
436 
437 
438 static int
dt_aggregate_snap_cpu(dtrace_hdl_t * dtp,processorid_t cpu)439 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
440 {
441 	dtrace_epid_t id;
442 	uint64_t hashval;
443 	size_t offs, roffs, size, ndx;
444 	int i, j, rval;
445 	caddr_t addr, data;
446 	dtrace_recdesc_t *rec;
447 	dt_aggregate_t *agp = &dtp->dt_aggregate;
448 	dtrace_aggdesc_t *agg;
449 	dt_ahash_t *hash = &agp->dtat_hash;
450 	dt_ahashent_t *h;
451 	dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
452 	dtrace_aggdata_t *aggdata;
453 	int flags = agp->dtat_flags;
454 
455 	buf->dtbd_cpu = cpu;
456 
457 #ifdef illumos
458 	if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
459 #else
460 	if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, &buf) == -1) {
461 #endif
462 		if (errno == ENOENT) {
463 			/*
464 			 * If that failed with ENOENT, it may be because the
465 			 * CPU was unconfigured.  This is okay; we'll just
466 			 * do nothing but return success.
467 			 */
468 			return (0);
469 		}
470 
471 		return (dt_set_errno(dtp, errno));
472 	}
473 
474 	if (buf->dtbd_drops != 0) {
475 		int error;
476 
477 		if (dtp->dt_oformat) {
478 			xo_open_instance("probes");
479 			dt_oformat_drop(dtp, cpu);
480 		}
481 		error = dt_handle_cpudrop(dtp, cpu, DTRACEDROP_AGGREGATION,
482 		    buf->dtbd_drops);
483 		if (dtp->dt_oformat)
484 			xo_close_instance("probes");
485 		if (error != 0)
486 			return (-1);
487 	}
488 
489 	if (buf->dtbd_size == 0)
490 		return (0);
491 
492 	if (hash->dtah_hash == NULL) {
493 		size_t size;
494 
495 		hash->dtah_size = DTRACE_AHASHSIZE;
496 		size = hash->dtah_size * sizeof (dt_ahashent_t *);
497 
498 		if ((hash->dtah_hash = malloc(size)) == NULL)
499 			return (dt_set_errno(dtp, EDT_NOMEM));
500 
501 		bzero(hash->dtah_hash, size);
502 	}
503 
504 	for (offs = 0; offs < buf->dtbd_size; ) {
505 		/*
506 		 * We're guaranteed to have an ID.
507 		 */
508 		id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
509 		    (uintptr_t)offs));
510 
511 		if (id == DTRACE_AGGIDNONE) {
512 			/*
513 			 * This is filler to assure proper alignment of the
514 			 * next record; we simply ignore it.
515 			 */
516 			offs += sizeof (id);
517 			continue;
518 		}
519 
520 		if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
521 			return (rval);
522 
523 		addr = buf->dtbd_data + offs;
524 		size = agg->dtagd_size;
525 		hashval = 0;
526 
527 		for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
528 			rec = &agg->dtagd_rec[j];
529 			roffs = rec->dtrd_offset;
530 
531 			switch (rec->dtrd_action) {
532 			case DTRACEACT_USYM:
533 				dt_aggregate_usym(dtp,
534 				    /* LINTED - alignment */
535 				    (uint64_t *)&addr[roffs]);
536 				break;
537 
538 			case DTRACEACT_UMOD:
539 				dt_aggregate_umod(dtp,
540 				    /* LINTED - alignment */
541 				    (uint64_t *)&addr[roffs]);
542 				break;
543 
544 			case DTRACEACT_SYM:
545 				/* LINTED - alignment */
546 				dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
547 				break;
548 
549 			case DTRACEACT_MOD:
550 				/* LINTED - alignment */
551 				dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
552 				break;
553 
554 			default:
555 				break;
556 			}
557 
558 			for (i = 0; i < rec->dtrd_size; i++)
559 				hashval += addr[roffs + i];
560 		}
561 
562 		ndx = hashval % hash->dtah_size;
563 
564 		for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
565 			if (h->dtahe_hashval != hashval)
566 				continue;
567 
568 			if (h->dtahe_size != size)
569 				continue;
570 
571 			aggdata = &h->dtahe_data;
572 			data = aggdata->dtada_data;
573 
574 			for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
575 				rec = &agg->dtagd_rec[j];
576 				roffs = rec->dtrd_offset;
577 
578 				for (i = 0; i < rec->dtrd_size; i++)
579 					if (addr[roffs + i] != data[roffs + i])
580 						goto hashnext;
581 			}
582 
583 			/*
584 			 * We found it.  Now we need to apply the aggregating
585 			 * action on the data here.
586 			 */
587 			rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
588 			roffs = rec->dtrd_offset;
589 			/* LINTED - alignment */
590 			h->dtahe_aggregate((int64_t *)&data[roffs],
591 			    /* LINTED - alignment */
592 			    (int64_t *)&addr[roffs], rec->dtrd_size);
593 
594 			/*
595 			 * If we're keeping per CPU data, apply the aggregating
596 			 * action there as well.
597 			 */
598 			if (aggdata->dtada_percpu != NULL) {
599 				data = aggdata->dtada_percpu[cpu];
600 
601 				/* LINTED - alignment */
602 				h->dtahe_aggregate((int64_t *)data,
603 				    /* LINTED - alignment */
604 				    (int64_t *)&addr[roffs], rec->dtrd_size);
605 			}
606 
607 			goto bufnext;
608 hashnext:
609 			continue;
610 		}
611 
612 		/*
613 		 * If we're here, we couldn't find an entry for this record.
614 		 */
615 		if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
616 			return (dt_set_errno(dtp, EDT_NOMEM));
617 		bzero(h, sizeof (dt_ahashent_t));
618 		aggdata = &h->dtahe_data;
619 
620 		if ((aggdata->dtada_data = malloc(size)) == NULL) {
621 			free(h);
622 			return (dt_set_errno(dtp, EDT_NOMEM));
623 		}
624 
625 		bcopy(addr, aggdata->dtada_data, size);
626 		aggdata->dtada_size = size;
627 		aggdata->dtada_desc = agg;
628 		aggdata->dtada_handle = dtp;
629 		(void) dt_epid_lookup(dtp, agg->dtagd_epid,
630 		    &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
631 		aggdata->dtada_normal = 1;
632 
633 		h->dtahe_hashval = hashval;
634 		h->dtahe_size = size;
635 		(void) dt_aggregate_aggvarid(h);
636 
637 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
638 
639 		if (flags & DTRACE_A_PERCPU) {
640 			int max_cpus = agp->dtat_maxcpu;
641 			caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
642 
643 			if (percpu == NULL) {
644 				free(aggdata->dtada_data);
645 				free(h);
646 				return (dt_set_errno(dtp, EDT_NOMEM));
647 			}
648 
649 			for (j = 0; j < max_cpus; j++) {
650 				percpu[j] = malloc(rec->dtrd_size);
651 
652 				if (percpu[j] == NULL) {
653 					while (--j >= 0)
654 						free(percpu[j]);
655 
656 					free(aggdata->dtada_data);
657 					free(h);
658 					return (dt_set_errno(dtp, EDT_NOMEM));
659 				}
660 
661 				if (j == cpu) {
662 					bcopy(&addr[rec->dtrd_offset],
663 					    percpu[j], rec->dtrd_size);
664 				} else {
665 					bzero(percpu[j], rec->dtrd_size);
666 				}
667 			}
668 
669 			aggdata->dtada_percpu = percpu;
670 		}
671 
672 		switch (rec->dtrd_action) {
673 		case DTRACEAGG_MIN:
674 			h->dtahe_aggregate = dt_aggregate_min;
675 			break;
676 
677 		case DTRACEAGG_MAX:
678 			h->dtahe_aggregate = dt_aggregate_max;
679 			break;
680 
681 		case DTRACEAGG_LQUANTIZE:
682 			h->dtahe_aggregate = dt_aggregate_lquantize;
683 			break;
684 
685 		case DTRACEAGG_LLQUANTIZE:
686 			h->dtahe_aggregate = dt_aggregate_llquantize;
687 			break;
688 
689 		case DTRACEAGG_COUNT:
690 		case DTRACEAGG_SUM:
691 		case DTRACEAGG_AVG:
692 		case DTRACEAGG_STDDEV:
693 		case DTRACEAGG_QUANTIZE:
694 			h->dtahe_aggregate = dt_aggregate_count;
695 			break;
696 
697 		default:
698 			return (dt_set_errno(dtp, EDT_BADAGG));
699 		}
700 
701 		if (hash->dtah_hash[ndx] != NULL)
702 			hash->dtah_hash[ndx]->dtahe_prev = h;
703 
704 		h->dtahe_next = hash->dtah_hash[ndx];
705 		hash->dtah_hash[ndx] = h;
706 
707 		if (hash->dtah_all != NULL)
708 			hash->dtah_all->dtahe_prevall = h;
709 
710 		h->dtahe_nextall = hash->dtah_all;
711 		hash->dtah_all = h;
712 bufnext:
713 		offs += agg->dtagd_size;
714 	}
715 
716 	return (0);
717 }
718 
719 int
720 dtrace_aggregate_snap(dtrace_hdl_t *dtp)
721 {
722 	int i, rval;
723 	dt_aggregate_t *agp = &dtp->dt_aggregate;
724 	hrtime_t now = gethrtime();
725 	dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
726 
727 	if (dtp->dt_lastagg != 0) {
728 		if (now - dtp->dt_lastagg < interval)
729 			return (0);
730 
731 		dtp->dt_lastagg += interval;
732 	} else {
733 		dtp->dt_lastagg = now;
734 	}
735 
736 	if (!dtp->dt_active)
737 		return (dt_set_errno(dtp, EINVAL));
738 
739 	if (agp->dtat_buf.dtbd_size == 0)
740 		return (0);
741 
742 	for (i = 0; i < agp->dtat_ncpus; i++) {
743 		if ((rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])))
744 			return (rval);
745 	}
746 
747 	return (0);
748 }
749 
750 static int
751 dt_aggregate_hashcmp(const void *lhs, const void *rhs)
752 {
753 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
754 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
755 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
756 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
757 
758 	if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
759 		return (DT_LESSTHAN);
760 
761 	if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
762 		return (DT_GREATERTHAN);
763 
764 	return (0);
765 }
766 
767 static int
768 dt_aggregate_varcmp(const void *lhs, const void *rhs)
769 {
770 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
771 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
772 	dtrace_aggvarid_t lid, rid;
773 
774 	lid = dt_aggregate_aggvarid(lh);
775 	rid = dt_aggregate_aggvarid(rh);
776 
777 	if (lid < rid)
778 		return (DT_LESSTHAN);
779 
780 	if (lid > rid)
781 		return (DT_GREATERTHAN);
782 
783 	return (0);
784 }
785 
786 static int
787 dt_aggregate_keycmp(const void *lhs, const void *rhs)
788 {
789 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
790 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
791 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
792 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
793 	dtrace_recdesc_t *lrec, *rrec;
794 	char *ldata, *rdata;
795 	int rval, i, j, keypos, nrecs;
796 
797 	if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
798 		return (rval);
799 
800 	nrecs = lagg->dtagd_nrecs - 1;
801 	assert(nrecs == ragg->dtagd_nrecs - 1);
802 
803 	keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
804 
805 	for (i = 1; i < nrecs; i++) {
806 		uint64_t lval, rval;
807 		int ndx = i + keypos;
808 
809 		if (ndx >= nrecs)
810 			ndx = ndx - nrecs + 1;
811 
812 		lrec = &lagg->dtagd_rec[ndx];
813 		rrec = &ragg->dtagd_rec[ndx];
814 
815 		ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
816 		rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
817 
818 		if (lrec->dtrd_size < rrec->dtrd_size)
819 			return (DT_LESSTHAN);
820 
821 		if (lrec->dtrd_size > rrec->dtrd_size)
822 			return (DT_GREATERTHAN);
823 
824 		switch (lrec->dtrd_size) {
825 		case sizeof (uint64_t):
826 			/* LINTED - alignment */
827 			lval = *((uint64_t *)ldata);
828 			/* LINTED - alignment */
829 			rval = *((uint64_t *)rdata);
830 			break;
831 
832 		case sizeof (uint32_t):
833 			/* LINTED - alignment */
834 			lval = *((uint32_t *)ldata);
835 			/* LINTED - alignment */
836 			rval = *((uint32_t *)rdata);
837 			break;
838 
839 		case sizeof (uint16_t):
840 			/* LINTED - alignment */
841 			lval = *((uint16_t *)ldata);
842 			/* LINTED - alignment */
843 			rval = *((uint16_t *)rdata);
844 			break;
845 
846 		case sizeof (uint8_t):
847 			lval = *((uint8_t *)ldata);
848 			rval = *((uint8_t *)rdata);
849 			break;
850 
851 		default:
852 			switch (lrec->dtrd_action) {
853 			case DTRACEACT_UMOD:
854 			case DTRACEACT_UADDR:
855 			case DTRACEACT_USYM:
856 				for (j = 0; j < 2; j++) {
857 					/* LINTED - alignment */
858 					lval = ((uint64_t *)ldata)[j];
859 					/* LINTED - alignment */
860 					rval = ((uint64_t *)rdata)[j];
861 
862 					if (lval < rval)
863 						return (DT_LESSTHAN);
864 
865 					if (lval > rval)
866 						return (DT_GREATERTHAN);
867 				}
868 
869 				break;
870 
871 			default:
872 				for (j = 0; j < lrec->dtrd_size; j++) {
873 					lval = ((uint8_t *)ldata)[j];
874 					rval = ((uint8_t *)rdata)[j];
875 
876 					if (lval < rval)
877 						return (DT_LESSTHAN);
878 
879 					if (lval > rval)
880 						return (DT_GREATERTHAN);
881 				}
882 			}
883 
884 			continue;
885 		}
886 
887 		if (lval < rval)
888 			return (DT_LESSTHAN);
889 
890 		if (lval > rval)
891 			return (DT_GREATERTHAN);
892 	}
893 
894 	return (0);
895 }
896 
897 static int
898 dt_aggregate_valcmp(const void *lhs, const void *rhs)
899 {
900 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
901 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
902 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
903 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
904 	caddr_t ldata = lh->dtahe_data.dtada_data;
905 	caddr_t rdata = rh->dtahe_data.dtada_data;
906 	dtrace_recdesc_t *lrec, *rrec;
907 	int64_t *laddr, *raddr;
908 	int rval;
909 
910 	assert(lagg->dtagd_nrecs == ragg->dtagd_nrecs);
911 
912 	lrec = &lagg->dtagd_rec[lagg->dtagd_nrecs - 1];
913 	rrec = &ragg->dtagd_rec[ragg->dtagd_nrecs - 1];
914 
915 	assert(lrec->dtrd_action == rrec->dtrd_action);
916 
917 	laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
918 	raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
919 
920 	switch (lrec->dtrd_action) {
921 	case DTRACEAGG_AVG:
922 		rval = dt_aggregate_averagecmp(laddr, raddr);
923 		break;
924 
925 	case DTRACEAGG_STDDEV:
926 		rval = dt_aggregate_stddevcmp(laddr, raddr);
927 		break;
928 
929 	case DTRACEAGG_QUANTIZE:
930 		rval = dt_aggregate_quantizedcmp(laddr, raddr);
931 		break;
932 
933 	case DTRACEAGG_LQUANTIZE:
934 		rval = dt_aggregate_lquantizedcmp(laddr, raddr);
935 		break;
936 
937 	case DTRACEAGG_LLQUANTIZE:
938 		rval = dt_aggregate_llquantizedcmp(laddr, raddr);
939 		break;
940 
941 	case DTRACEAGG_COUNT:
942 	case DTRACEAGG_SUM:
943 	case DTRACEAGG_MIN:
944 	case DTRACEAGG_MAX:
945 		rval = dt_aggregate_countcmp(laddr, raddr);
946 		break;
947 
948 	default:
949 		assert(0);
950 	}
951 
952 	return (rval);
953 }
954 
955 static int
956 dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
957 {
958 	int rval;
959 
960 	if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
961 		return (rval);
962 
963 	/*
964 	 * If we're here, the values for the two aggregation elements are
965 	 * equal.  We already know that the key layout is the same for the two
966 	 * elements; we must now compare the keys themselves as a tie-breaker.
967 	 */
968 	return (dt_aggregate_keycmp(lhs, rhs));
969 }
970 
971 static int
972 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
973 {
974 	int rval;
975 
976 	if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
977 		return (rval);
978 
979 	return (dt_aggregate_varcmp(lhs, rhs));
980 }
981 
982 static int
983 dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
984 {
985 	int rval;
986 
987 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
988 		return (rval);
989 
990 	return (dt_aggregate_keycmp(lhs, rhs));
991 }
992 
993 static int
994 dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
995 {
996 	int rval;
997 
998 	if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
999 		return (rval);
1000 
1001 	return (dt_aggregate_varcmp(lhs, rhs));
1002 }
1003 
1004 static int
1005 dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
1006 {
1007 	int rval;
1008 
1009 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
1010 		return (rval);
1011 
1012 	return (dt_aggregate_valkeycmp(lhs, rhs));
1013 }
1014 
1015 static int
1016 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
1017 {
1018 	return (dt_aggregate_keyvarcmp(rhs, lhs));
1019 }
1020 
1021 static int
1022 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
1023 {
1024 	return (dt_aggregate_varkeycmp(rhs, lhs));
1025 }
1026 
1027 static int
1028 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
1029 {
1030 	return (dt_aggregate_valvarcmp(rhs, lhs));
1031 }
1032 
1033 static int
1034 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
1035 {
1036 	return (dt_aggregate_varvalcmp(rhs, lhs));
1037 }
1038 
1039 static int
1040 dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
1041 {
1042 	dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
1043 	dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
1044 	int i, rval;
1045 
1046 	if (dt_keysort) {
1047 		/*
1048 		 * If we're sorting on keys, we need to scan until we find the
1049 		 * last entry -- that's the representative key.  (The order of
1050 		 * the bundle is values followed by key to accommodate the
1051 		 * default behavior of sorting by value.)  If the keys are
1052 		 * equal, we'll fall into the value comparison loop, below.
1053 		 */
1054 		for (i = 0; lh[i + 1] != NULL; i++)
1055 			continue;
1056 
1057 		assert(i != 0);
1058 		assert(rh[i + 1] == NULL);
1059 
1060 		if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
1061 			return (rval);
1062 	}
1063 
1064 	for (i = 0; ; i++) {
1065 		if (lh[i + 1] == NULL) {
1066 			/*
1067 			 * All of the values are equal; if we're sorting on
1068 			 * keys, then we're only here because the keys were
1069 			 * found to be equal and these records are therefore
1070 			 * equal.  If we're not sorting on keys, we'll use the
1071 			 * key comparison from the representative key as the
1072 			 * tie-breaker.
1073 			 */
1074 			if (dt_keysort)
1075 				return (0);
1076 
1077 			assert(i != 0);
1078 			assert(rh[i + 1] == NULL);
1079 			return (dt_aggregate_keycmp(&lh[i], &rh[i]));
1080 		} else {
1081 			if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
1082 				return (rval);
1083 		}
1084 	}
1085 }
1086 
1087 int
1088 dt_aggregate_go(dtrace_hdl_t *dtp)
1089 {
1090 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1091 	dtrace_optval_t size, cpu;
1092 	dtrace_bufdesc_t *buf = &agp->dtat_buf;
1093 	int rval, i;
1094 
1095 	assert(agp->dtat_maxcpu == 0);
1096 	assert(agp->dtat_ncpu == 0);
1097 	assert(agp->dtat_cpus == NULL);
1098 
1099 	agp->dtat_maxcpu = dt_cpu_maxid(dtp) + 1;
1100 	if (agp->dtat_maxcpu <= 0)
1101 		return (-1);
1102 	agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_CONF);
1103 	agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1104 
1105 	if (agp->dtat_cpus == NULL)
1106 		return (dt_set_errno(dtp, EDT_NOMEM));
1107 
1108 	/*
1109 	 * Use the aggregation buffer size as reloaded from the kernel.
1110 	 */
1111 	size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1112 
1113 	rval = dtrace_getopt(dtp, "aggsize", &size);
1114 	assert(rval == 0);
1115 
1116 	if (size == 0 || size == DTRACEOPT_UNSET)
1117 		return (0);
1118 
1119 	buf = &agp->dtat_buf;
1120 	buf->dtbd_size = size;
1121 
1122 	if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1123 		return (dt_set_errno(dtp, EDT_NOMEM));
1124 
1125 	/*
1126 	 * Now query for the CPUs enabled.
1127 	 */
1128 	rval = dtrace_getopt(dtp, "cpu", &cpu);
1129 	assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1130 
1131 	if (cpu != DTRACE_CPUALL) {
1132 		assert(cpu < agp->dtat_ncpu);
1133 		agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1134 
1135 		return (0);
1136 	}
1137 
1138 	agp->dtat_ncpus = 0;
1139 	for (i = 0; i < agp->dtat_maxcpu; i++) {
1140 		if (dt_status(dtp, i) == -1)
1141 			continue;
1142 
1143 		agp->dtat_cpus[agp->dtat_ncpus++] = i;
1144 	}
1145 
1146 	return (0);
1147 }
1148 
1149 static int
1150 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1151 {
1152 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1153 	dtrace_aggdata_t *data;
1154 	dtrace_aggdesc_t *aggdesc;
1155 	dtrace_recdesc_t *rec;
1156 	int i;
1157 
1158 	switch (rval) {
1159 	case DTRACE_AGGWALK_NEXT:
1160 		break;
1161 
1162 	case DTRACE_AGGWALK_CLEAR: {
1163 		uint32_t size, offs = 0;
1164 
1165 		aggdesc = h->dtahe_data.dtada_desc;
1166 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1167 		size = rec->dtrd_size;
1168 		data = &h->dtahe_data;
1169 
1170 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1171 			offs = sizeof (uint64_t);
1172 			size -= sizeof (uint64_t);
1173 		}
1174 
1175 		bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1176 
1177 		if (data->dtada_percpu == NULL)
1178 			break;
1179 
1180 		for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1181 			bzero(data->dtada_percpu[i] + offs, size);
1182 		break;
1183 	}
1184 
1185 	case DTRACE_AGGWALK_ERROR:
1186 		/*
1187 		 * We assume that errno is already set in this case.
1188 		 */
1189 		return (dt_set_errno(dtp, errno));
1190 
1191 	case DTRACE_AGGWALK_ABORT:
1192 		return (dt_set_errno(dtp, EDT_DIRABORT));
1193 
1194 	case DTRACE_AGGWALK_DENORMALIZE:
1195 		h->dtahe_data.dtada_normal = 1;
1196 		return (0);
1197 
1198 	case DTRACE_AGGWALK_NORMALIZE:
1199 		if (h->dtahe_data.dtada_normal == 0) {
1200 			h->dtahe_data.dtada_normal = 1;
1201 			return (dt_set_errno(dtp, EDT_BADRVAL));
1202 		}
1203 
1204 		return (0);
1205 
1206 	case DTRACE_AGGWALK_REMOVE: {
1207 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1208 		int max_cpus = agp->dtat_maxcpu;
1209 
1210 		/*
1211 		 * First, remove this hash entry from its hash chain.
1212 		 */
1213 		if (h->dtahe_prev != NULL) {
1214 			h->dtahe_prev->dtahe_next = h->dtahe_next;
1215 		} else {
1216 			dt_ahash_t *hash = &agp->dtat_hash;
1217 			size_t ndx = h->dtahe_hashval % hash->dtah_size;
1218 
1219 			assert(hash->dtah_hash[ndx] == h);
1220 			hash->dtah_hash[ndx] = h->dtahe_next;
1221 		}
1222 
1223 		if (h->dtahe_next != NULL)
1224 			h->dtahe_next->dtahe_prev = h->dtahe_prev;
1225 
1226 		/*
1227 		 * Now remove it from the list of all hash entries.
1228 		 */
1229 		if (h->dtahe_prevall != NULL) {
1230 			h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1231 		} else {
1232 			dt_ahash_t *hash = &agp->dtat_hash;
1233 
1234 			assert(hash->dtah_all == h);
1235 			hash->dtah_all = h->dtahe_nextall;
1236 		}
1237 
1238 		if (h->dtahe_nextall != NULL)
1239 			h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1240 
1241 		/*
1242 		 * We're unlinked.  We can safely destroy the data.
1243 		 */
1244 		if (aggdata->dtada_percpu != NULL) {
1245 			for (i = 0; i < max_cpus; i++)
1246 				free(aggdata->dtada_percpu[i]);
1247 			free(aggdata->dtada_percpu);
1248 		}
1249 
1250 		free(aggdata->dtada_data);
1251 		free(h);
1252 
1253 		return (0);
1254 	}
1255 
1256 	default:
1257 		return (dt_set_errno(dtp, EDT_BADRVAL));
1258 	}
1259 
1260 	return (0);
1261 }
1262 
1263 void
1264 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1265     int (*compar)(const void *, const void *))
1266 {
1267 	int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1268 	dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1269 
1270 	dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1271 	dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1272 
1273 	if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1274 		dt_keypos = (int)keyposopt;
1275 	} else {
1276 		dt_keypos = 0;
1277 	}
1278 
1279 	if (compar == NULL) {
1280 		if (!dt_keysort) {
1281 			compar = dt_aggregate_varvalcmp;
1282 		} else {
1283 			compar = dt_aggregate_varkeycmp;
1284 		}
1285 	}
1286 
1287 	qsort(base, nel, width, compar);
1288 
1289 	dt_revsort = rev;
1290 	dt_keysort = key;
1291 	dt_keypos = keypos;
1292 }
1293 
1294 int
1295 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1296 {
1297 	dt_ahashent_t *h, *next;
1298 	dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1299 
1300 	for (h = hash->dtah_all; h != NULL; h = next) {
1301 		/*
1302 		 * dt_aggwalk_rval() can potentially remove the current hash
1303 		 * entry; we need to load the next hash entry before calling
1304 		 * into it.
1305 		 */
1306 		next = h->dtahe_nextall;
1307 
1308 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1309 			return (-1);
1310 	}
1311 
1312 	return (0);
1313 }
1314 
1315 static int
1316 dt_aggregate_total(dtrace_hdl_t *dtp, boolean_t clear)
1317 {
1318 	dt_ahashent_t *h;
1319 	dtrace_aggdata_t **total;
1320 	dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id;
1321 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1322 	dt_ahash_t *hash = &agp->dtat_hash;
1323 	uint32_t tflags;
1324 
1325 	tflags = DTRACE_A_TOTAL | DTRACE_A_HASNEGATIVES | DTRACE_A_HASPOSITIVES;
1326 
1327 	/*
1328 	 * If we need to deliver per-aggregation totals, we're going to take
1329 	 * three passes over the aggregate:  one to clear everything out and
1330 	 * determine our maximum aggregation ID, one to actually total
1331 	 * everything up, and a final pass to assign the totals to the
1332 	 * individual elements.
1333 	 */
1334 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1335 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1336 
1337 		if ((id = dt_aggregate_aggvarid(h)) > max)
1338 			max = id;
1339 
1340 		aggdata->dtada_total = 0;
1341 		aggdata->dtada_flags &= ~tflags;
1342 	}
1343 
1344 	if (clear || max == DTRACE_AGGVARIDNONE)
1345 		return (0);
1346 
1347 	total = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *));
1348 
1349 	if (total == NULL)
1350 		return (-1);
1351 
1352 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1353 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1354 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1355 		dtrace_recdesc_t *rec;
1356 		caddr_t data;
1357 		int64_t val, *addr;
1358 
1359 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
1360 		data = aggdata->dtada_data;
1361 		addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset);
1362 
1363 		switch (rec->dtrd_action) {
1364 		case DTRACEAGG_STDDEV:
1365 			val = dt_stddev((uint64_t *)addr, 1);
1366 			break;
1367 
1368 		case DTRACEAGG_SUM:
1369 		case DTRACEAGG_COUNT:
1370 			val = *addr;
1371 			break;
1372 
1373 		case DTRACEAGG_AVG:
1374 			val = addr[0] ? (addr[1] / addr[0]) : 0;
1375 			break;
1376 
1377 		default:
1378 			continue;
1379 		}
1380 
1381 		if (total[agg->dtagd_varid] == NULL) {
1382 			total[agg->dtagd_varid] = aggdata;
1383 			aggdata->dtada_flags |= DTRACE_A_TOTAL;
1384 		} else {
1385 			aggdata = total[agg->dtagd_varid];
1386 		}
1387 
1388 		if (val > 0)
1389 			aggdata->dtada_flags |= DTRACE_A_HASPOSITIVES;
1390 
1391 		if (val < 0) {
1392 			aggdata->dtada_flags |= DTRACE_A_HASNEGATIVES;
1393 			val = -val;
1394 		}
1395 
1396 		if (dtp->dt_options[DTRACEOPT_AGGZOOM] != DTRACEOPT_UNSET) {
1397 			val = (int64_t)((long double)val *
1398 			    (1 / DTRACE_AGGZOOM_MAX));
1399 
1400 			if (val > aggdata->dtada_total)
1401 				aggdata->dtada_total = val;
1402 		} else {
1403 			aggdata->dtada_total += val;
1404 		}
1405 	}
1406 
1407 	/*
1408 	 * And now one final pass to set everyone's total.
1409 	 */
1410 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1411 		dtrace_aggdata_t *aggdata = &h->dtahe_data, *t;
1412 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1413 
1414 		if ((t = total[agg->dtagd_varid]) == NULL || aggdata == t)
1415 			continue;
1416 
1417 		aggdata->dtada_total = t->dtada_total;
1418 		aggdata->dtada_flags |= (t->dtada_flags & tflags);
1419 	}
1420 
1421 	dt_free(dtp, total);
1422 
1423 	return (0);
1424 }
1425 
1426 static int
1427 dt_aggregate_minmaxbin(dtrace_hdl_t *dtp, boolean_t clear)
1428 {
1429 	dt_ahashent_t *h;
1430 	dtrace_aggdata_t **minmax;
1431 	dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id;
1432 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1433 	dt_ahash_t *hash = &agp->dtat_hash;
1434 
1435 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1436 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1437 
1438 		if ((id = dt_aggregate_aggvarid(h)) > max)
1439 			max = id;
1440 
1441 		aggdata->dtada_minbin = 0;
1442 		aggdata->dtada_maxbin = 0;
1443 		aggdata->dtada_flags &= ~DTRACE_A_MINMAXBIN;
1444 	}
1445 
1446 	if (clear || max == DTRACE_AGGVARIDNONE)
1447 		return (0);
1448 
1449 	minmax = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *));
1450 
1451 	if (minmax == NULL)
1452 		return (-1);
1453 
1454 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1455 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1456 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1457 		dtrace_recdesc_t *rec;
1458 		caddr_t data;
1459 		int64_t *addr;
1460 		int minbin = -1, maxbin = -1, i;
1461 		int start = 0, size;
1462 
1463 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
1464 		size = rec->dtrd_size / sizeof (int64_t);
1465 		data = aggdata->dtada_data;
1466 		addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset);
1467 
1468 		switch (rec->dtrd_action) {
1469 		case DTRACEAGG_LQUANTIZE:
1470 			/*
1471 			 * For lquantize(), we always display the entire range
1472 			 * of the aggregation when aggpack is set.
1473 			 */
1474 			start = 1;
1475 			minbin = start;
1476 			maxbin = size - 1 - start;
1477 			break;
1478 
1479 		case DTRACEAGG_QUANTIZE:
1480 			for (i = start; i < size; i++) {
1481 				if (!addr[i])
1482 					continue;
1483 
1484 				if (minbin == -1)
1485 					minbin = i - start;
1486 
1487 				maxbin = i - start;
1488 			}
1489 
1490 			if (minbin == -1) {
1491 				/*
1492 				 * If we have no data (e.g., due to a clear()
1493 				 * or negative increments), we'll use the
1494 				 * zero bucket as both our min and max.
1495 				 */
1496 				minbin = maxbin = DTRACE_QUANTIZE_ZEROBUCKET;
1497 			}
1498 
1499 			break;
1500 
1501 		default:
1502 			continue;
1503 		}
1504 
1505 		if (minmax[agg->dtagd_varid] == NULL) {
1506 			minmax[agg->dtagd_varid] = aggdata;
1507 			aggdata->dtada_flags |= DTRACE_A_MINMAXBIN;
1508 			aggdata->dtada_minbin = minbin;
1509 			aggdata->dtada_maxbin = maxbin;
1510 			continue;
1511 		}
1512 
1513 		if (minbin < minmax[agg->dtagd_varid]->dtada_minbin)
1514 			minmax[agg->dtagd_varid]->dtada_minbin = minbin;
1515 
1516 		if (maxbin > minmax[agg->dtagd_varid]->dtada_maxbin)
1517 			minmax[agg->dtagd_varid]->dtada_maxbin = maxbin;
1518 	}
1519 
1520 	/*
1521 	 * And now one final pass to set everyone's minbin and maxbin.
1522 	 */
1523 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1524 		dtrace_aggdata_t *aggdata = &h->dtahe_data, *mm;
1525 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1526 
1527 		if ((mm = minmax[agg->dtagd_varid]) == NULL || aggdata == mm)
1528 			continue;
1529 
1530 		aggdata->dtada_minbin = mm->dtada_minbin;
1531 		aggdata->dtada_maxbin = mm->dtada_maxbin;
1532 		aggdata->dtada_flags |= DTRACE_A_MINMAXBIN;
1533 	}
1534 
1535 	dt_free(dtp, minmax);
1536 
1537 	return (0);
1538 }
1539 
1540 static int
1541 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1542     dtrace_aggregate_f *func, void *arg,
1543     int (*sfunc)(const void *, const void *))
1544 {
1545 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1546 	dt_ahashent_t *h, **sorted;
1547 	dt_ahash_t *hash = &agp->dtat_hash;
1548 	size_t i, nentries = 0;
1549 	int rval = -1;
1550 
1551 	agp->dtat_flags &= ~(DTRACE_A_TOTAL | DTRACE_A_MINMAXBIN);
1552 
1553 	if (dtp->dt_options[DTRACEOPT_AGGHIST] != DTRACEOPT_UNSET) {
1554 		agp->dtat_flags |= DTRACE_A_TOTAL;
1555 
1556 		if (dt_aggregate_total(dtp, B_FALSE) != 0)
1557 			return (-1);
1558 	}
1559 
1560 	if (dtp->dt_options[DTRACEOPT_AGGPACK] != DTRACEOPT_UNSET) {
1561 		agp->dtat_flags |= DTRACE_A_MINMAXBIN;
1562 
1563 		if (dt_aggregate_minmaxbin(dtp, B_FALSE) != 0)
1564 			return (-1);
1565 	}
1566 
1567 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1568 		nentries++;
1569 
1570 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1571 
1572 	if (sorted == NULL)
1573 		goto out;
1574 
1575 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1576 		sorted[i++] = h;
1577 
1578 	(void) pthread_mutex_lock(&dt_qsort_lock);
1579 
1580 	if (sfunc == NULL) {
1581 		dt_aggregate_qsort(dtp, sorted, nentries,
1582 		    sizeof (dt_ahashent_t *), NULL);
1583 	} else {
1584 		/*
1585 		 * If we've been explicitly passed a sorting function,
1586 		 * we'll use that -- ignoring the values of the "aggsortrev",
1587 		 * "aggsortkey" and "aggsortkeypos" options.
1588 		 */
1589 		qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1590 	}
1591 
1592 	(void) pthread_mutex_unlock(&dt_qsort_lock);
1593 
1594 	for (i = 0; i < nentries; i++) {
1595 		h = sorted[i];
1596 
1597 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1598 			goto out;
1599 	}
1600 
1601 	rval = 0;
1602 out:
1603 	if (agp->dtat_flags & DTRACE_A_TOTAL)
1604 		(void) dt_aggregate_total(dtp, B_TRUE);
1605 
1606 	if (agp->dtat_flags & DTRACE_A_MINMAXBIN)
1607 		(void) dt_aggregate_minmaxbin(dtp, B_TRUE);
1608 
1609 	dt_free(dtp, sorted);
1610 	return (rval);
1611 }
1612 
1613 int
1614 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1615     dtrace_aggregate_f *func, void *arg)
1616 {
1617 	return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1618 }
1619 
1620 int
1621 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1622     dtrace_aggregate_f *func, void *arg)
1623 {
1624 	return (dt_aggregate_walk_sorted(dtp, func,
1625 	    arg, dt_aggregate_varkeycmp));
1626 }
1627 
1628 int
1629 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1630     dtrace_aggregate_f *func, void *arg)
1631 {
1632 	return (dt_aggregate_walk_sorted(dtp, func,
1633 	    arg, dt_aggregate_varvalcmp));
1634 }
1635 
1636 int
1637 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1638     dtrace_aggregate_f *func, void *arg)
1639 {
1640 	return (dt_aggregate_walk_sorted(dtp, func,
1641 	    arg, dt_aggregate_keyvarcmp));
1642 }
1643 
1644 int
1645 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1646     dtrace_aggregate_f *func, void *arg)
1647 {
1648 	return (dt_aggregate_walk_sorted(dtp, func,
1649 	    arg, dt_aggregate_valvarcmp));
1650 }
1651 
1652 int
1653 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1654     dtrace_aggregate_f *func, void *arg)
1655 {
1656 	return (dt_aggregate_walk_sorted(dtp, func,
1657 	    arg, dt_aggregate_varkeyrevcmp));
1658 }
1659 
1660 int
1661 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1662     dtrace_aggregate_f *func, void *arg)
1663 {
1664 	return (dt_aggregate_walk_sorted(dtp, func,
1665 	    arg, dt_aggregate_varvalrevcmp));
1666 }
1667 
1668 int
1669 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1670     dtrace_aggregate_f *func, void *arg)
1671 {
1672 	return (dt_aggregate_walk_sorted(dtp, func,
1673 	    arg, dt_aggregate_keyvarrevcmp));
1674 }
1675 
1676 int
1677 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1678     dtrace_aggregate_f *func, void *arg)
1679 {
1680 	return (dt_aggregate_walk_sorted(dtp, func,
1681 	    arg, dt_aggregate_valvarrevcmp));
1682 }
1683 
1684 int
1685 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1686     int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1687 {
1688 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1689 	dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1690 	const dtrace_aggdata_t **data;
1691 	dt_ahashent_t *zaggdata = NULL;
1692 	dt_ahash_t *hash = &agp->dtat_hash;
1693 	size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1694 	dtrace_aggvarid_t max = 0, aggvar;
1695 	int rval = -1, *map, *remap = NULL;
1696 	int i, j;
1697 	dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1698 
1699 	/*
1700 	 * If the sorting position is greater than the number of aggregation
1701 	 * variable IDs, we silently set it to 0.
1702 	 */
1703 	if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1704 		sortpos = 0;
1705 
1706 	/*
1707 	 * First we need to translate the specified aggregation variable IDs
1708 	 * into a linear map that will allow us to translate an aggregation
1709 	 * variable ID into its position in the specified aggvars.
1710 	 */
1711 	for (i = 0; i < naggvars; i++) {
1712 		if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1713 			return (dt_set_errno(dtp, EDT_BADAGGVAR));
1714 
1715 		if (aggvars[i] > max)
1716 			max = aggvars[i];
1717 	}
1718 
1719 	if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1720 		return (-1);
1721 
1722 	zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1723 
1724 	if (zaggdata == NULL)
1725 		goto out;
1726 
1727 	for (i = 0; i < naggvars; i++) {
1728 		int ndx = i + sortpos;
1729 
1730 		if (ndx >= naggvars)
1731 			ndx -= naggvars;
1732 
1733 		aggvar = aggvars[ndx];
1734 		assert(aggvar <= max);
1735 
1736 		if (map[aggvar]) {
1737 			/*
1738 			 * We have an aggregation variable that is present
1739 			 * more than once in the array of aggregation
1740 			 * variables.  While it's unclear why one might want
1741 			 * to do this, it's legal.  To support this construct,
1742 			 * we will allocate a remap that will indicate the
1743 			 * position from which this aggregation variable
1744 			 * should be pulled.  (That is, where the remap will
1745 			 * map from one position to another.)
1746 			 */
1747 			if (remap == NULL) {
1748 				remap = dt_zalloc(dtp, naggvars * sizeof (int));
1749 
1750 				if (remap == NULL)
1751 					goto out;
1752 			}
1753 
1754 			/*
1755 			 * Given that the variable is already present, assert
1756 			 * that following through the mapping and adjusting
1757 			 * for the sort position yields the same aggregation
1758 			 * variable ID.
1759 			 */
1760 			assert(aggvars[(map[aggvar] - 1 + sortpos) %
1761 			    naggvars] == aggvars[ndx]);
1762 
1763 			remap[i] = map[aggvar];
1764 			continue;
1765 		}
1766 
1767 		map[aggvar] = i + 1;
1768 	}
1769 
1770 	/*
1771 	 * We need to take two passes over the data to size our allocation, so
1772 	 * we'll use the first pass to also fill in the zero-filled data to be
1773 	 * used to properly format a zero-valued aggregation.
1774 	 */
1775 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1776 		dtrace_aggvarid_t id;
1777 		int ndx;
1778 
1779 		if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1780 			continue;
1781 
1782 		if (zaggdata[ndx - 1].dtahe_size == 0) {
1783 			zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1784 			zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1785 		}
1786 
1787 		nentries++;
1788 	}
1789 
1790 	if (nentries == 0) {
1791 		/*
1792 		 * We couldn't find any entries; there is nothing else to do.
1793 		 */
1794 		rval = 0;
1795 		goto out;
1796 	}
1797 
1798 	/*
1799 	 * Before we sort the data, we're going to look for any holes in our
1800 	 * zero-filled data.  This will occur if an aggregation variable that
1801 	 * we are being asked to print has not yet been assigned the result of
1802 	 * any aggregating action for _any_ tuple.  The issue becomes that we
1803 	 * would like a zero value to be printed for all columns for this
1804 	 * aggregation, but without any record description, we don't know the
1805 	 * aggregating action that corresponds to the aggregation variable.  To
1806 	 * try to find a match, we're simply going to lookup aggregation IDs
1807 	 * (which are guaranteed to be contiguous and to start from 1), looking
1808 	 * for the specified aggregation variable ID.  If we find a match,
1809 	 * we'll use that.  If we iterate over all aggregation IDs and don't
1810 	 * find a match, then we must be an anonymous enabling.  (Anonymous
1811 	 * enablings can't currently derive either aggregation variable IDs or
1812 	 * aggregation variable names given only an aggregation ID.)  In this
1813 	 * obscure case (anonymous enabling, multiple aggregation printa() with
1814 	 * some aggregations not represented for any tuple), our defined
1815 	 * behavior is that the zero will be printed in the format of the first
1816 	 * aggregation variable that contains any non-zero value.
1817 	 */
1818 	for (i = 0; i < naggvars; i++) {
1819 		if (zaggdata[i].dtahe_size == 0) {
1820 			dtrace_aggvarid_t aggvar;
1821 
1822 			aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1823 			assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1824 
1825 			for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1826 				dtrace_aggdesc_t *agg;
1827 				dtrace_aggdata_t *aggdata;
1828 
1829 				if (dt_aggid_lookup(dtp, j, &agg) != 0)
1830 					break;
1831 
1832 				if (agg->dtagd_varid != aggvar)
1833 					continue;
1834 
1835 				/*
1836 				 * We have our description -- now we need to
1837 				 * cons up the zaggdata entry for it.
1838 				 */
1839 				aggdata = &zaggdata[i].dtahe_data;
1840 				aggdata->dtada_size = agg->dtagd_size;
1841 				aggdata->dtada_desc = agg;
1842 				aggdata->dtada_handle = dtp;
1843 				(void) dt_epid_lookup(dtp, agg->dtagd_epid,
1844 				    &aggdata->dtada_edesc,
1845 				    &aggdata->dtada_pdesc);
1846 				aggdata->dtada_normal = 1;
1847 				zaggdata[i].dtahe_hashval = 0;
1848 				zaggdata[i].dtahe_size = agg->dtagd_size;
1849 				break;
1850 			}
1851 
1852 			if (zaggdata[i].dtahe_size == 0) {
1853 				caddr_t data;
1854 
1855 				/*
1856 				 * We couldn't find this aggregation, meaning
1857 				 * that we have never seen it before for any
1858 				 * tuple _and_ this is an anonymous enabling.
1859 				 * That is, we're in the obscure case outlined
1860 				 * above.  In this case, our defined behavior
1861 				 * is to format the data in the format of the
1862 				 * first non-zero aggregation -- of which, of
1863 				 * course, we know there to be at least one
1864 				 * (or nentries would have been zero).
1865 				 */
1866 				for (j = 0; j < naggvars; j++) {
1867 					if (zaggdata[j].dtahe_size != 0)
1868 						break;
1869 				}
1870 
1871 				assert(j < naggvars);
1872 				zaggdata[i] = zaggdata[j];
1873 
1874 				data = zaggdata[i].dtahe_data.dtada_data;
1875 				assert(data != NULL);
1876 			}
1877 		}
1878 	}
1879 
1880 	/*
1881 	 * Now we need to allocate our zero-filled data for use for
1882 	 * aggregations that don't have a value corresponding to a given key.
1883 	 */
1884 	for (i = 0; i < naggvars; i++) {
1885 		dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1886 		dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1887 		dtrace_recdesc_t *rec;
1888 		uint64_t larg;
1889 		caddr_t zdata;
1890 
1891 		zsize = zaggdata[i].dtahe_size;
1892 		assert(zsize != 0);
1893 
1894 		if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1895 			/*
1896 			 * If we failed to allocated some zero-filled data, we
1897 			 * need to zero out the remaining dtada_data pointers
1898 			 * to prevent the wrong data from being freed below.
1899 			 */
1900 			for (j = i; j < naggvars; j++)
1901 				zaggdata[j].dtahe_data.dtada_data = NULL;
1902 			goto out;
1903 		}
1904 
1905 		aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1906 
1907 		/*
1908 		 * First, the easy bit.  To maintain compatibility with
1909 		 * consumers that pull the compiler-generated ID out of the
1910 		 * data, we put that ID at the top of the zero-filled data.
1911 		 */
1912 		rec = &aggdesc->dtagd_rec[0];
1913 		/* LINTED - alignment */
1914 		*((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1915 
1916 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1917 
1918 		/*
1919 		 * Now for the more complicated part.  If (and only if) this
1920 		 * is an lquantize() aggregating action, zero-filled data is
1921 		 * not equivalent to an empty record:  we must also get the
1922 		 * parameters for the lquantize().
1923 		 */
1924 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1925 			if (aggdata->dtada_data != NULL) {
1926 				/*
1927 				 * The easier case here is if we actually have
1928 				 * some prototype data -- in which case we
1929 				 * manually dig it out of the aggregation
1930 				 * record.
1931 				 */
1932 				/* LINTED - alignment */
1933 				larg = *((uint64_t *)(aggdata->dtada_data +
1934 				    rec->dtrd_offset));
1935 			} else {
1936 				/*
1937 				 * We don't have any prototype data.  As a
1938 				 * result, we know that we _do_ have the
1939 				 * compiler-generated information.  (If this
1940 				 * were an anonymous enabling, all of our
1941 				 * zero-filled data would have prototype data
1942 				 * -- either directly or indirectly.) So as
1943 				 * gross as it is, we'll grovel around in the
1944 				 * compiler-generated information to find the
1945 				 * lquantize() parameters.
1946 				 */
1947 				dtrace_stmtdesc_t *sdp;
1948 				dt_ident_t *aid;
1949 				dt_idsig_t *isp;
1950 
1951 				sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1952 				    aggdesc->dtagd_rec[0].dtrd_uarg;
1953 				aid = sdp->dtsd_aggdata;
1954 				isp = (dt_idsig_t *)aid->di_data;
1955 				assert(isp->dis_auxinfo != 0);
1956 				larg = isp->dis_auxinfo;
1957 			}
1958 
1959 			/* LINTED - alignment */
1960 			*((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1961 		}
1962 
1963 		aggdata->dtada_data = zdata;
1964 	}
1965 
1966 	/*
1967 	 * Now that we've dealt with setting up our zero-filled data, we can
1968 	 * allocate our sorted array, and take another pass over the data to
1969 	 * fill it.
1970 	 */
1971 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1972 
1973 	if (sorted == NULL)
1974 		goto out;
1975 
1976 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1977 		dtrace_aggvarid_t id;
1978 
1979 		if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1980 			continue;
1981 
1982 		sorted[i++] = h;
1983 	}
1984 
1985 	assert(i == nentries);
1986 
1987 	/*
1988 	 * We've loaded our array; now we need to sort by value to allow us
1989 	 * to create bundles of like value.  We're going to acquire the
1990 	 * dt_qsort_lock here, and hold it across all of our subsequent
1991 	 * comparison and sorting.
1992 	 */
1993 	(void) pthread_mutex_lock(&dt_qsort_lock);
1994 
1995 	qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1996 	    dt_aggregate_keyvarcmp);
1997 
1998 	/*
1999 	 * Now we need to go through and create bundles.  Because the number
2000 	 * of bundles is bounded by the size of the sorted array, we're going
2001 	 * to reuse the underlying storage.  And note that "bundle" is an
2002 	 * array of pointers to arrays of pointers to dt_ahashent_t -- making
2003 	 * its type (regrettably) "dt_ahashent_t ***".  (Regrettable because
2004 	 * '*' -- like '_' and 'X' -- should never appear in triplicate in
2005 	 * an ideal world.)
2006 	 */
2007 	bundle = (dt_ahashent_t ***)sorted;
2008 
2009 	for (i = 1, start = 0; i <= nentries; i++) {
2010 		if (i < nentries &&
2011 		    dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
2012 			continue;
2013 
2014 		/*
2015 		 * We have a bundle boundary.  Everything from start to
2016 		 * (i - 1) belongs in one bundle.
2017 		 */
2018 		assert(i - start <= naggvars);
2019 		bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
2020 
2021 		if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
2022 			(void) pthread_mutex_unlock(&dt_qsort_lock);
2023 			goto out;
2024 		}
2025 
2026 		for (j = start; j < i; j++) {
2027 			dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
2028 
2029 			assert(id <= max);
2030 			assert(map[id] != 0);
2031 			assert(map[id] - 1 < naggvars);
2032 			assert(nbundle[map[id] - 1] == NULL);
2033 			nbundle[map[id] - 1] = sorted[j];
2034 
2035 			if (nbundle[naggvars] == NULL)
2036 				nbundle[naggvars] = sorted[j];
2037 		}
2038 
2039 		for (j = 0; j < naggvars; j++) {
2040 			if (nbundle[j] != NULL)
2041 				continue;
2042 
2043 			/*
2044 			 * Before we assume that this aggregation variable
2045 			 * isn't present (and fall back to using the
2046 			 * zero-filled data allocated earlier), check the
2047 			 * remap.  If we have a remapping, we'll drop it in
2048 			 * here.  Note that we might be remapping an
2049 			 * aggregation variable that isn't present for this
2050 			 * key; in this case, the aggregation data that we
2051 			 * copy will point to the zeroed data.
2052 			 */
2053 			if (remap != NULL && remap[j]) {
2054 				assert(remap[j] - 1 < j);
2055 				assert(nbundle[remap[j] - 1] != NULL);
2056 				nbundle[j] = nbundle[remap[j] - 1];
2057 			} else {
2058 				nbundle[j] = &zaggdata[j];
2059 			}
2060 		}
2061 
2062 		bundle[nbundles++] = nbundle;
2063 		start = i;
2064 	}
2065 
2066 	/*
2067 	 * Now we need to re-sort based on the first value.
2068 	 */
2069 	dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
2070 	    dt_aggregate_bundlecmp);
2071 
2072 	(void) pthread_mutex_unlock(&dt_qsort_lock);
2073 
2074 	/*
2075 	 * We're done!  Now we just need to go back over the sorted bundles,
2076 	 * calling the function.
2077 	 */
2078 	data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
2079 
2080 	for (i = 0; i < nbundles; i++) {
2081 		for (j = 0; j < naggvars; j++)
2082 			data[j + 1] = NULL;
2083 
2084 		for (j = 0; j < naggvars; j++) {
2085 			int ndx = j - sortpos;
2086 
2087 			if (ndx < 0)
2088 				ndx += naggvars;
2089 
2090 			assert(bundle[i][ndx] != NULL);
2091 			data[j + 1] = &bundle[i][ndx]->dtahe_data;
2092 		}
2093 
2094 		for (j = 0; j < naggvars; j++)
2095 			assert(data[j + 1] != NULL);
2096 
2097 		/*
2098 		 * The representative key is the last element in the bundle.
2099 		 * Assert that we have one, and then set it to be the first
2100 		 * element of data.
2101 		 */
2102 		assert(bundle[i][j] != NULL);
2103 		data[0] = &bundle[i][j]->dtahe_data;
2104 
2105 		if ((rval = func(data, naggvars + 1, arg)) == -1)
2106 			goto out;
2107 	}
2108 
2109 	rval = 0;
2110 out:
2111 	for (i = 0; i < nbundles; i++)
2112 		dt_free(dtp, bundle[i]);
2113 
2114 	if (zaggdata != NULL) {
2115 		for (i = 0; i < naggvars; i++)
2116 			dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
2117 	}
2118 
2119 	dt_free(dtp, zaggdata);
2120 	dt_free(dtp, sorted);
2121 	dt_free(dtp, remap);
2122 	dt_free(dtp, map);
2123 
2124 	return (rval);
2125 }
2126 
2127 int
2128 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
2129     dtrace_aggregate_walk_f *func)
2130 {
2131 	dt_print_aggdata_t pd;
2132 
2133 	bzero(&pd, sizeof (pd));
2134 
2135 	pd.dtpa_dtp = dtp;
2136 	pd.dtpa_fp = fp;
2137 	pd.dtpa_allunprint = 1;
2138 
2139 	if (func == NULL)
2140 		func = dtrace_aggregate_walk_sorted;
2141 
2142 	if (dtp->dt_oformat) {
2143 		if ((*func)(dtp, dt_format_agg, &pd) == -1)
2144 			return (dt_set_errno(dtp, dtp->dt_errno));
2145 	} else {
2146 		if ((*func)(dtp, dt_print_agg, &pd) == -1)
2147 			return (dt_set_errno(dtp, dtp->dt_errno));
2148 	}
2149 
2150 	return (0);
2151 }
2152 
2153 void
2154 dtrace_aggregate_clear(dtrace_hdl_t *dtp)
2155 {
2156 	dt_aggregate_t *agp = &dtp->dt_aggregate;
2157 	dt_ahash_t *hash = &agp->dtat_hash;
2158 	dt_ahashent_t *h;
2159 	dtrace_aggdata_t *data;
2160 	dtrace_aggdesc_t *aggdesc;
2161 	dtrace_recdesc_t *rec;
2162 	int i, max_cpus = agp->dtat_maxcpu;
2163 
2164 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
2165 		aggdesc = h->dtahe_data.dtada_desc;
2166 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
2167 		data = &h->dtahe_data;
2168 
2169 		bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
2170 
2171 		if (data->dtada_percpu == NULL)
2172 			continue;
2173 
2174 		for (i = 0; i < max_cpus; i++)
2175 			bzero(data->dtada_percpu[i], rec->dtrd_size);
2176 	}
2177 }
2178 
2179 void
2180 dt_aggregate_destroy(dtrace_hdl_t *dtp)
2181 {
2182 	dt_aggregate_t *agp = &dtp->dt_aggregate;
2183 	dt_ahash_t *hash = &agp->dtat_hash;
2184 	dt_ahashent_t *h, *next;
2185 	dtrace_aggdata_t *aggdata;
2186 	int i, max_cpus = agp->dtat_maxcpu;
2187 
2188 	if (hash->dtah_hash == NULL) {
2189 		assert(hash->dtah_all == NULL);
2190 	} else {
2191 		free(hash->dtah_hash);
2192 
2193 		for (h = hash->dtah_all; h != NULL; h = next) {
2194 			next = h->dtahe_nextall;
2195 
2196 			aggdata = &h->dtahe_data;
2197 
2198 			if (aggdata->dtada_percpu != NULL) {
2199 				for (i = 0; i < max_cpus; i++)
2200 					free(aggdata->dtada_percpu[i]);
2201 				free(aggdata->dtada_percpu);
2202 			}
2203 
2204 			free(aggdata->dtada_data);
2205 			free(h);
2206 		}
2207 
2208 		hash->dtah_hash = NULL;
2209 		hash->dtah_all = NULL;
2210 		hash->dtah_size = 0;
2211 	}
2212 
2213 	free(agp->dtat_buf.dtbd_data);
2214 	free(agp->dtat_cpus);
2215 }
2216