1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/exacct.h> 31 #include <sys/exacct_catalog.h> 32 #include <sys/exacct_impl.h> 33 34 #ifndef _KERNEL 35 #include <limits.h> 36 #include <errno.h> 37 #include <poll.h> 38 #include <stdlib.h> 39 #include <strings.h> 40 #else 41 #include <sys/systm.h> 42 #endif 43 44 /* 45 * extended accounting file core routines 46 * 47 * Routines shared by libexacct and the kernel for the definition, 48 * construction and packing of extended accounting (exacct) records. 49 * 50 * Locking 51 * All routines in this file use ea_alloc(), which is a malloc() wrapper 52 * in userland and a kmem_alloc(..., KM_SLEEP) wrapper in the kernel. 53 * Accordingly, all routines require a context suitable for KM_SLEEP 54 * allocations. 55 */ 56 57 #define DEFAULT_ENTRIES 4 58 59 /* 60 * ea_alloc() and ea_free() provide a wrapper for the common 61 * exacct code offering access to either the kmem allocator, or to libc's 62 * malloc. 63 */ 64 void * 65 ea_alloc(size_t size) 66 { 67 #ifndef _KERNEL 68 void *p; 69 70 while ((p = malloc(size)) == NULL && errno == EAGAIN) 71 (void) poll(NULL, 0, 10 * MILLISEC); 72 if (p == NULL) { 73 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 74 } else { 75 EXACCT_SET_ERR(EXR_OK); 76 } 77 return (p); 78 #else 79 return (kmem_alloc(size, KM_SLEEP)); 80 #endif 81 } 82 83 #ifndef _KERNEL 84 /*ARGSUSED*/ 85 #endif 86 void 87 ea_free(void *ptr, size_t size) 88 { 89 #ifndef _KERNEL 90 free(ptr); 91 #else 92 kmem_free(ptr, size); 93 #endif 94 } 95 96 /* 97 * ea_strdup() returns a pointer that, if non-NULL, must be freed using 98 * ea_strfree() once its useful life ends. 99 */ 100 char * 101 ea_strdup(const char *ptr) 102 { 103 /* Sets exacct_errno. */ 104 char *p = ea_alloc(strlen(ptr) + 1); 105 if (p != NULL) { 106 bcopy(ptr, p, strlen(ptr) + 1); 107 } 108 return (p); 109 } 110 111 /* 112 * ea_strfree() frees a string allocated with ea_strdup(). 113 */ 114 void 115 ea_strfree(char *ptr) 116 { 117 #ifndef _KERNEL 118 free(ptr); 119 #else 120 kmem_free(ptr, strlen(ptr) + 1); 121 #endif 122 } 123 124 /* 125 * ea_cond_memcpy_at_offset() provides a simple conditional memcpy() that allows 126 * us to write a pack routine that returns a valid buffer size, copying only in 127 * the case that a non-NULL buffer is provided. 128 */ 129 static void 130 ea_cond_memcpy_at_offset(void *dst, size_t offset, size_t dstsize, void *src, 131 size_t size) 132 { 133 char *cdst = dst; 134 char *csrc = src; 135 136 if (dst == NULL || src == NULL || size == 0 || offset + size > dstsize) 137 return; 138 139 bcopy(csrc, cdst + offset, size); 140 } 141 142 /* 143 * exacct_order{16,32,64}() are byte-swapping routines that place the native 144 * data indicated by the input pointer in big-endian order. Each exacct_order 145 * function is its own inverse. 146 */ 147 #ifndef _LITTLE_ENDIAN 148 /*ARGSUSED*/ 149 #endif /* _LITTLE_ENDIAN */ 150 void 151 exacct_order16(uint16_t *in) 152 { 153 #ifdef _LITTLE_ENDIAN 154 uint8_t s; 155 union { 156 uint16_t agg; 157 uint8_t arr[2]; 158 } t; 159 160 t.agg = *in; 161 162 s = t.arr[0]; 163 t.arr[0] = t.arr[1]; 164 t.arr[1] = s; 165 166 *in = t.agg; 167 #endif /* _LITTLE_ENDIAN */ 168 } 169 170 #ifndef _LITTLE_ENDIAN 171 /*ARGSUSED*/ 172 #endif /* _LITTLE_ENDIAN */ 173 void 174 exacct_order32(uint32_t *in) 175 { 176 #ifdef _LITTLE_ENDIAN 177 uint16_t s; 178 union { 179 uint32_t agg; 180 uint16_t arr[2]; 181 } t; 182 183 t.agg = *in; 184 exacct_order16(&t.arr[0]); 185 exacct_order16(&t.arr[1]); 186 187 s = t.arr[0]; 188 t.arr[0] = t.arr[1]; 189 t.arr[1] = s; 190 191 *in = t.agg; 192 #endif /* _LITTLE_ENDIAN */ 193 } 194 195 #ifndef _LITTLE_ENDIAN 196 /*ARGSUSED*/ 197 #endif /* _LITTLE_ENDIAN */ 198 void 199 exacct_order64(uint64_t *in) 200 { 201 #ifdef _LITTLE_ENDIAN 202 uint32_t s; 203 union { 204 uint64_t agg; 205 uint32_t arr[2]; 206 } t; 207 208 t.agg = *in; 209 exacct_order32(&t.arr[0]); 210 exacct_order32(&t.arr[1]); 211 212 s = t.arr[0]; 213 t.arr[0] = t.arr[1]; 214 t.arr[1] = s; 215 216 *in = t.agg; 217 #endif /* _LITTLE_ENDIAN */ 218 } 219 220 int 221 ea_match_object_catalog(ea_object_t *obj, ea_catalog_t catmask) 222 { 223 ea_catalog_t catval = obj->eo_catalog; 224 225 #define EM_MATCH(v, m, M) ((m & M) == 0 || (v & M) == (m & M)) 226 return (EM_MATCH(catval, catmask, EXT_TYPE_MASK) && 227 EM_MATCH(catval, catmask, EXC_CATALOG_MASK) && 228 EM_MATCH(catval, catmask, EXD_DATA_MASK)); 229 #undef EM_MATCH 230 } 231 232 int 233 ea_set_item(ea_object_t *obj, ea_catalog_t tag, 234 const void *value, size_t valsize) 235 { 236 ea_item_t *item = &obj->eo_item; 237 238 if ((tag & EXT_TYPE_MASK) == EXT_GROUP) { 239 EXACCT_SET_ERR(EXR_INVALID_OBJ); 240 return (-1); 241 } 242 243 bzero(obj, sizeof (ea_object_t)); 244 obj->eo_type = EO_ITEM; 245 obj->eo_catalog = tag; 246 247 switch (obj->eo_catalog & EXT_TYPE_MASK) { 248 case EXT_UINT8: 249 item->ei_u.ei_u_uint8 = *(uint8_t *)value; 250 item->ei_size = sizeof (uint8_t); 251 break; 252 case EXT_UINT16: 253 item->ei_u.ei_u_uint16 = *(uint16_t *)value; 254 item->ei_size = sizeof (uint16_t); 255 break; 256 case EXT_UINT32: 257 item->ei_u.ei_u_uint32 = *(uint32_t *)value; 258 item->ei_size = sizeof (uint32_t); 259 break; 260 case EXT_UINT64: 261 item->ei_u.ei_u_uint64 = *(uint64_t *)value; 262 item->ei_size = sizeof (uint64_t); 263 break; 264 case EXT_DOUBLE: 265 item->ei_u.ei_u_double = *(double *)value; 266 item->ei_size = sizeof (double); 267 break; 268 case EXT_STRING: 269 if ((item->ei_string = ea_strdup((char *)value)) == NULL) { 270 /* exacct_errno set above. */ 271 return (-1); 272 } 273 item->ei_size = strlen(item->ei_string) + 1; 274 break; 275 case EXT_EXACCT_OBJECT: 276 if ((item->ei_object = ea_alloc(valsize)) == NULL) { 277 /* exacct_errno set above. */ 278 return (-1); 279 } 280 bcopy(value, item->ei_object, valsize); 281 item->ei_size = valsize; 282 break; 283 case EXT_RAW: 284 if ((item->ei_raw = ea_alloc(valsize)) == NULL) { 285 /* exacct_errno set above. */ 286 return (-1); 287 } 288 bcopy(value, item->ei_raw, valsize); 289 item->ei_size = valsize; 290 break; 291 default: 292 EXACCT_SET_ERR(EXR_INVALID_OBJ); 293 return (-1); 294 } 295 296 EXACCT_SET_ERR(EXR_OK); 297 return (0); 298 } 299 300 int 301 ea_set_group(ea_object_t *obj, ea_catalog_t tag) 302 { 303 if ((tag & EXT_TYPE_MASK) != EXT_GROUP) { 304 EXACCT_SET_ERR(EXR_INVALID_OBJ); 305 return (-1); 306 } 307 308 bzero(obj, sizeof (ea_object_t)); 309 310 obj->eo_type = EO_GROUP; 311 obj->eo_catalog = tag; 312 obj->eo_u.eo_u_group.eg_nobjs = 0; 313 obj->eo_u.eo_u_group.eg_objs = NULL; 314 315 EXACCT_SET_ERR(EXR_OK); 316 return (0); 317 } 318 319 void 320 ea_free_object(ea_object_t *obj, int flag) 321 { 322 ea_object_t *next = obj; 323 ea_object_t *save; 324 325 while (next != NULL) { 326 if (next->eo_type == EO_GROUP) { 327 ea_free_object(next->eo_group.eg_objs, flag); 328 } else if (next->eo_type == EO_ITEM) { 329 switch (next->eo_catalog & EXT_TYPE_MASK) { 330 case EXT_STRING: 331 if (flag == EUP_ALLOC) 332 ea_strfree(next->eo_item.ei_string); 333 break; 334 case EXT_RAW: 335 case EXT_EXACCT_OBJECT: 336 if (flag == EUP_ALLOC) 337 ea_free(next->eo_item.ei_raw, 338 next->eo_item.ei_size); 339 break; 340 default: 341 /* No action required for other types. */ 342 break; 343 } 344 } 345 /* No action required for EO_NONE. */ 346 347 save = next; 348 next = next->eo_next; 349 #ifdef _KERNEL 350 kmem_cache_free(exacct_object_cache, save); 351 #else 352 ea_free(save, sizeof (ea_object_t)); 353 #endif /* _KERNEL */ 354 } 355 } 356 357 int 358 ea_free_item(ea_object_t *obj, int flag) 359 { 360 if (obj->eo_type != EO_ITEM) { 361 EXACCT_SET_ERR(EXR_INVALID_OBJ); 362 return (-1); 363 } 364 365 switch (obj->eo_catalog & EXT_TYPE_MASK) { 366 case EXT_STRING: 367 if (flag == EUP_ALLOC) 368 ea_strfree(obj->eo_item.ei_string); 369 break; 370 case EXT_RAW: 371 case EXT_EXACCT_OBJECT: 372 if (flag == EUP_ALLOC) 373 ea_free(obj->eo_item.ei_raw, obj->eo_item.ei_size); 374 break; 375 default: 376 /* No action required for other types. */ 377 break; 378 } 379 380 obj->eo_catalog = 0; 381 obj->eo_type = EO_NONE; 382 EXACCT_SET_ERR(EXR_OK); 383 return (0); 384 } 385 386 static void 387 ea_attach_object(ea_object_t **objp, ea_object_t *obj) 388 { 389 ea_object_t *tp; 390 391 tp = *objp; 392 *objp = obj; 393 obj->eo_next = tp; 394 } 395 396 int 397 ea_attach_to_object(ea_object_t *root, ea_object_t *obj) 398 { 399 if (obj->eo_type == EO_GROUP || obj->eo_type == EO_ITEM) { 400 ea_attach_object(&root->eo_next, obj); 401 EXACCT_SET_ERR(EXR_OK); 402 return (0); 403 } else { 404 EXACCT_SET_ERR(EXR_INVALID_OBJ); 405 return (-1); 406 } 407 } 408 409 /* 410 * ea_attach_to_group() takes a group object and an additional exacct object and 411 * attaches the latter to the object list of the former. The attached exacct 412 * object can be the head of a chain of objects. If group isn't actually an 413 * object of type EO_GROUP, do nothing, such that we don't destroy its contents. 414 */ 415 int 416 ea_attach_to_group(ea_object_t *group, ea_object_t *obj) 417 { 418 uint_t n = 0; 419 ea_object_t *next; 420 ea_object_t **nextp; 421 422 if (group->eo_type != EO_GROUP) { 423 EXACCT_SET_ERR(EXR_INVALID_OBJ); 424 return (-1); 425 } 426 427 for (next = obj; next != NULL; next = next->eo_next) 428 n++; 429 430 group->eo_group.eg_nobjs += n; 431 432 for (nextp = &group->eo_group.eg_objs; *nextp != NULL; 433 nextp = &(*nextp)->eo_next) 434 continue; 435 436 ea_attach_object(nextp, obj); 437 EXACCT_SET_ERR(EXR_OK); 438 return (0); 439 } 440 441 /* 442 * ea_pack_object takes the given exacct object series beginning with obj and 443 * places it in buf. Since ea_pack_object needs to be runnable in kernel 444 * context, we construct it to use its own stack of state. Specifically, we 445 * store the locations of the sizes of open records (records whose construction 446 * is in progress). curr_frame is used to indicate the current frame. Just 447 * prior to decrementing curr_frame, we must ensure that the correct size for 448 * that frame is placed in the given offset. 449 */ 450 struct es_frame { 451 ea_object_t *esf_obj; 452 ea_size_t esf_size; 453 ea_size_t esf_bksize; 454 ea_size_t esf_offset; 455 }; 456 457 static void 458 incr_parent_frames(struct es_frame *base, int n, size_t amt) 459 { 460 int i; 461 462 for (i = 0; i <= n; i++) { 463 base[i].esf_size += amt; 464 base[i].esf_bksize += amt; 465 } 466 } 467 468 size_t 469 ea_pack_object(ea_object_t *obj, void *buf, size_t bufsize) 470 { 471 struct es_frame *estack; 472 uint_t neframes; 473 ea_object_t *curr_obj = obj; 474 int curr_frame = 0; 475 size_t curr_pos = 0; 476 ea_size_t placeholder = 0; 477 int end_of_group = 0; 478 uint32_t gp_backskip = sizeof (ea_catalog_t) + sizeof (ea_size_t) + 479 sizeof (uint32_t) + sizeof (uint32_t); 480 uint32_t lge_backskip; 481 482 exacct_order32(&gp_backskip); 483 estack = ea_alloc(sizeof (struct es_frame) * DEFAULT_ENTRIES); 484 if (estack == NULL) { 485 /* exacct_errno set above. */ 486 return ((size_t)-1); 487 } 488 bzero(estack, sizeof (struct es_frame) * DEFAULT_ENTRIES); 489 neframes = DEFAULT_ENTRIES; 490 491 /* 492 * 1. Start with the current object. 493 */ 494 for (;;) { 495 void *src; 496 size_t size; 497 498 /* 499 * 1a. If at the bottom of the stack, we are done. 500 * If at the end of a group, place the correct size at the head 501 * of the chain, the correct backskip amount in the next 502 * position in the buffer, and retreat to the previous frame. 503 */ 504 if (end_of_group) { 505 if (--curr_frame < 0) { 506 break; 507 } 508 509 exacct_order64(&estack[curr_frame].esf_size); 510 ea_cond_memcpy_at_offset(buf, 511 estack[curr_frame].esf_offset, bufsize, 512 &estack[curr_frame].esf_size, sizeof (ea_size_t)); 513 exacct_order64(&estack[curr_frame].esf_size); 514 515 /* 516 * Note that the large backskip is only 32 bits, whereas 517 * an object can be up to 2^64 bytes long. If an object 518 * is greater than 2^32 bytes long set the large 519 * backskip to 0. This will prevent the file being 520 * read backwards by causing EOF to be returned when the 521 * big object is encountered, but reading forwards will 522 * still be OK as it ignores the large backskip field. 523 */ 524 estack[curr_frame].esf_bksize += sizeof (uint32_t); 525 526 lge_backskip = 527 estack[curr_frame].esf_bksize > UINT_MAX 528 ? 0 : (uint32_t)estack[curr_frame].esf_bksize; 529 exacct_order32(&lge_backskip); 530 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 531 &lge_backskip, sizeof (lge_backskip)); 532 533 curr_pos += sizeof (uint32_t); 534 incr_parent_frames(estack, curr_frame, 535 sizeof (uint32_t)); 536 537 if ((curr_obj = estack[curr_frame].esf_obj) != NULL) { 538 end_of_group = 0; 539 estack[curr_frame].esf_obj = NULL; 540 estack[curr_frame].esf_size = 0; 541 estack[curr_frame].esf_bksize = 0; 542 } else { 543 continue; 544 } 545 } 546 547 /* 548 * 2. Write the catalog tag. 549 */ 550 exacct_order32(&curr_obj->eo_catalog); 551 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 552 &curr_obj->eo_catalog, sizeof (ea_catalog_t)); 553 exacct_order32(&curr_obj->eo_catalog); 554 555 incr_parent_frames(estack, curr_frame, sizeof (ea_catalog_t)); 556 estack[curr_frame].esf_size -= sizeof (ea_catalog_t); 557 curr_pos += sizeof (ea_catalog_t); 558 estack[curr_frame].esf_offset = curr_pos; 559 560 /* 561 * 2a. If this type is of variable size, reserve space for the 562 * size field. 563 */ 564 switch (curr_obj->eo_catalog & EXT_TYPE_MASK) { 565 case EXT_GROUP: 566 case EXT_STRING: 567 case EXT_EXACCT_OBJECT: 568 case EXT_RAW: 569 exacct_order64(&placeholder); 570 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 571 &placeholder, sizeof (ea_size_t)); 572 exacct_order64(&placeholder); 573 574 incr_parent_frames(estack, curr_frame, 575 sizeof (ea_size_t)); 576 estack[curr_frame].esf_size -= sizeof (ea_size_t); 577 curr_pos += sizeof (ea_size_t); 578 break; 579 default: 580 break; 581 } 582 583 if (curr_obj->eo_type == EO_GROUP) { 584 /* 585 * 3A. If it's a group put its next pointer, size, and 586 * size position on the stack, add 1 to the stack, 587 * set the current object to eg_objs, and goto 1. 588 */ 589 estack[curr_frame].esf_obj = curr_obj->eo_next; 590 591 /* 592 * 3Aa. Insert the number of objects in the group. 593 */ 594 exacct_order32(&curr_obj->eo_group.eg_nobjs); 595 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 596 &curr_obj->eo_group.eg_nobjs, 597 sizeof (uint32_t)); 598 exacct_order32(&curr_obj->eo_group.eg_nobjs); 599 600 incr_parent_frames(estack, curr_frame, 601 sizeof (uint32_t)); 602 curr_pos += sizeof (uint32_t); 603 604 /* 605 * 3Ab. Insert a backskip of the appropriate size. 606 */ 607 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 608 &gp_backskip, sizeof (uint32_t)); 609 610 incr_parent_frames(estack, curr_frame, 611 sizeof (uint32_t)); 612 curr_pos += sizeof (uint32_t); 613 614 curr_frame++; 615 616 if (curr_frame >= neframes) { 617 /* 618 * Expand the eframe stack to handle the 619 * requested depth. 620 */ 621 uint_t new_neframes = 2 * neframes; 622 struct es_frame *new_estack = 623 ea_alloc(new_neframes * 624 sizeof (struct es_frame)); 625 if (new_estack == NULL) { 626 ea_free(estack, neframes * 627 sizeof (struct es_frame)); 628 /* exacct_errno set above. */ 629 return ((size_t)-1); 630 } 631 632 bzero(new_estack, new_neframes * 633 sizeof (struct es_frame)); 634 bcopy(estack, new_estack, neframes * 635 sizeof (struct es_frame)); 636 637 ea_free(estack, neframes * 638 sizeof (struct es_frame)); 639 estack = new_estack; 640 neframes = new_neframes; 641 } else { 642 bzero(&estack[curr_frame], 643 sizeof (struct es_frame)); 644 } 645 646 estack[curr_frame].esf_offset = curr_pos; 647 if ((curr_obj = curr_obj->eo_group.eg_objs) == NULL) { 648 end_of_group = 1; 649 } 650 651 continue; 652 } 653 654 /* 655 * 3B. Otherwise we're considering an item: add its ei_size to 656 * all sizes on the stack, and copy its size into position. 657 */ 658 switch (curr_obj->eo_catalog & EXT_TYPE_MASK) { 659 case EXT_UINT8: 660 src = &curr_obj->eo_item.ei_uint8; 661 size = sizeof (uint8_t); 662 break; 663 case EXT_UINT16: 664 src = &curr_obj->eo_item.ei_uint16; 665 size = sizeof (uint16_t); 666 exacct_order16(src); 667 break; 668 case EXT_UINT32: 669 src = &curr_obj->eo_item.ei_uint32; 670 size = sizeof (uint32_t); 671 exacct_order32(src); 672 break; 673 case EXT_UINT64: 674 src = &curr_obj->eo_item.ei_uint64; 675 size = sizeof (uint64_t); 676 exacct_order64(src); 677 break; 678 case EXT_DOUBLE: 679 src = &curr_obj->eo_item.ei_double; 680 size = sizeof (double); 681 exacct_order64((uint64_t *)src); 682 break; 683 case EXT_STRING: 684 src = curr_obj->eo_item.ei_string; 685 size = curr_obj->eo_item.ei_size; 686 break; 687 case EXT_EXACCT_OBJECT: 688 src = curr_obj->eo_item.ei_object; 689 size = curr_obj->eo_item.ei_size; 690 break; 691 case EXT_RAW: 692 src = curr_obj->eo_item.ei_raw; 693 size = curr_obj->eo_item.ei_size; 694 break; 695 case EXT_NONE: 696 default: 697 src = NULL; 698 size = 0; 699 break; 700 } 701 702 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, src, size); 703 incr_parent_frames(estack, curr_frame, size); 704 curr_pos += size; 705 706 /* 707 * 4. Write the large backskip amount into the buffer. 708 * See above for note about why this may be set to 0. 709 */ 710 incr_parent_frames(estack, curr_frame, sizeof (uint32_t)); 711 712 lge_backskip = estack[curr_frame].esf_bksize > UINT_MAX 713 ? 0 : (uint32_t)estack[curr_frame].esf_bksize; 714 exacct_order32(&lge_backskip); 715 ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, 716 &lge_backskip, sizeof (lge_backskip)); 717 718 curr_pos += sizeof (uint32_t); 719 720 switch (curr_obj->eo_catalog & EXT_TYPE_MASK) { 721 case EXT_RAW: 722 case EXT_STRING: 723 case EXT_EXACCT_OBJECT: 724 exacct_order64(&estack[curr_frame].esf_size); 725 ea_cond_memcpy_at_offset(buf, 726 estack[curr_frame].esf_offset, bufsize, 727 &estack[curr_frame].esf_size, sizeof (ea_size_t)); 728 exacct_order64(&estack[curr_frame].esf_size); 729 break; 730 case EXT_UINT16: 731 exacct_order16(src); 732 break; 733 case EXT_UINT32: 734 exacct_order32(src); 735 break; 736 case EXT_UINT64: 737 exacct_order64(src); 738 break; 739 case EXT_DOUBLE: 740 exacct_order64((uint64_t *)src); 741 break; 742 default: 743 break; 744 } 745 746 /* 747 * 5. If ei_next is NULL, we are at the end of a group.a If 748 * not, move on to the next item on the list. 749 */ 750 if (curr_obj->eo_next == NULL) { 751 end_of_group = 1; 752 } else { 753 curr_obj = curr_obj->eo_next; 754 estack[curr_frame].esf_obj = NULL; 755 estack[curr_frame].esf_size = 0; 756 estack[curr_frame].esf_bksize = 0; 757 } 758 } 759 760 ea_free(estack, neframes * sizeof (struct es_frame)); 761 EXACCT_SET_ERR(EXR_OK); 762 return (curr_pos); 763 } 764