1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/systeminfo.h> 30 31 #include <exacct.h> 32 #include <exacct_impl.h> 33 #include <sys/exacct_impl.h> 34 #include <fcntl.h> 35 #include <unistd.h> 36 #include <strings.h> 37 #include <stdlib.h> 38 #include <stdio.h> 39 #include <errno.h> 40 #include <thread.h> 41 #include <pthread.h> 42 43 #define EXACCT_HDR_STR "exacct" 44 #define EXACCT_HDR_LEN 7 45 46 #define DEFAULT_ENTRIES 4 47 #define SYSINFO_BUFSIZE 256 48 49 static thread_key_t errkey = THR_ONCE_KEY; 50 static int exacct_errval = 0; 51 52 /* 53 * extended accounting file access routines 54 * 55 * exacct_ops.c implements the library-specific routines of libexacct: the 56 * operations associated with file access and record traversal. (The 57 * complementary routines which permit hierarchy building and record packing 58 * are provided in exacct_core.c, which is used by both libexacct and the 59 * kernel.) At its heart are the unpack, get, and next routines, which 60 * navigate the packed records produced by ea_pack_object. 61 */ 62 63 /* 64 * Group stack manipulation code. As groups can be nested, we need a mechanism 65 * for saving and restoring the current position within the outer groups. This 66 * state stack is stored within the ea_file_impl_t structure, in the ef_depth, 67 * ef_ndeep and ef_mxdeep members. On error all these functions set 68 * exacct_error and return -1. 69 */ 70 71 /* 72 * If the stack is NULL, create and initialise it. 73 * If is is not NULL, check it still has space - if not, double its size. 74 */ 75 static int stack_check(ea_file_impl_t *f) 76 { 77 if (f->ef_depth == NULL) { 78 if ((f->ef_depth = 79 ea_alloc(DEFAULT_ENTRIES * sizeof (ea_file_depth_t))) 80 == NULL) { 81 /* exacct_errno set above. */ 82 return (-1); 83 } 84 bzero(f->ef_depth, DEFAULT_ENTRIES * sizeof (ea_file_depth_t)); 85 f->ef_mxdeep = DEFAULT_ENTRIES; 86 f->ef_ndeep = -1; 87 } else if (f->ef_ndeep + 1 >= f->ef_mxdeep) { 88 ea_file_depth_t *newstack; 89 90 if ((newstack = 91 ea_alloc(f->ef_mxdeep * 2 * sizeof (ea_file_depth_t))) 92 == NULL) { 93 /* exacct_errno set above. */ 94 return (-1); 95 } 96 bcopy(f->ef_depth, newstack, 97 f->ef_mxdeep * sizeof (ea_file_depth_t)); 98 bzero(newstack + f->ef_mxdeep, 99 f->ef_mxdeep * sizeof (ea_file_depth_t)); 100 ea_free(f->ef_depth, f->ef_mxdeep * sizeof (ea_file_depth_t)); 101 f->ef_mxdeep *= 2; 102 f->ef_depth = newstack; 103 } 104 return (0); 105 } 106 107 /* 108 * Free a stack. 109 */ 110 static void stack_free(ea_file_impl_t *f) 111 { 112 if (f->ef_depth != NULL) { 113 ea_free(f->ef_depth, f->ef_mxdeep * sizeof (ea_file_depth_t)); 114 f->ef_depth = NULL; 115 } 116 f->ef_mxdeep = 0; 117 f->ef_ndeep = -1; 118 } 119 120 /* 121 * Add a new group onto the stack, pushing down one frame. nobj is the number 122 * of items in the group. We have to read this many objects before popping 123 * back up to an enclosing group - see next_object() and previous_object() 124 * below. 125 */ 126 static int stack_new_group(ea_file_impl_t *f, int nobjs) 127 { 128 if (stack_check(f) != 0) { 129 stack_free(f); 130 /* exacct_errno set above. */ 131 return (-1); 132 } 133 f->ef_ndeep++; 134 f->ef_depth[f->ef_ndeep].efd_obj = 0; 135 f->ef_depth[f->ef_ndeep].efd_nobjs = nobjs; 136 return (0); 137 } 138 139 /* 140 * Step forwards along the objects within the current group. If we are still 141 * within a group, return 1. If we have reached the end of the current group, 142 * unwind the stack back up to the nearest enclosing group that still has 143 * unprocessed objects and return 0. On EOF or error, set exacct_error 144 * accordingly and return -1. xread() is required so that this function can 145 * work either on files or memory buffers. 146 */ 147 static int 148 stack_next_object( 149 ea_file_impl_t *f, 150 size_t (*xread)(ea_file_impl_t *, void *, size_t)) 151 { 152 uint32_t scratch32; 153 154 /* 155 * If the stack is empty we are not in a group, so there will be no 156 * stack manipulation to do and no large backskips to step over. 157 */ 158 if (f->ef_ndeep < 0) { 159 return (0); 160 } 161 162 /* 163 * Otherwise we must be in a group. If there are objects left in the 164 * group, move onto the next one in the group and return. 165 */ 166 if (++f->ef_depth[f->ef_ndeep].efd_obj < 167 f->ef_depth[f->ef_ndeep].efd_nobjs) { 168 return (1); 169 170 /* 171 * If we are at the end of a group we need to move backwards up the 172 * stack, consuming the large backskips as we go, until we find a group 173 * that still contains unprocessed items, or until we have unwound back 174 * off the bottom of the stack (i.e. out of all the groups). 175 */ 176 } else { 177 while (f->ef_ndeep >= 0 && 178 ++f->ef_depth[f->ef_ndeep].efd_obj >= 179 f->ef_depth[f->ef_ndeep].efd_nobjs) { 180 /* Read the large backskip. */ 181 f->ef_ndeep--; 182 if (xread(f, &scratch32, sizeof (scratch32)) != 183 sizeof (scratch32)) { 184 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 185 return (-1); 186 } 187 } 188 return (0); 189 } 190 } 191 192 /* 193 * Step backwards along the objects within the current group. If we are still 194 * within a group, return 1. If we have reached the end of the current group, 195 * unwind the stack back up to the enclosing group and return 0. 196 */ 197 static int stack_previous_object(ea_file_impl_t *f) 198 { 199 /* 200 * If the stack is empty we are not in a group, so there will be no 201 * stack manipulation to do. 202 */ 203 if (f->ef_ndeep < 0) { 204 return (0); 205 } 206 207 /* 208 * Otherwise we must be in a group. If there are objects left in the 209 * group, move onto the previous one in the group and return. 210 */ 211 if (--f->ef_depth[f->ef_ndeep].efd_obj >= 0) { 212 return (1); 213 214 /* Otherwise, step one level back up the group stack. */ 215 } else { 216 f->ef_ndeep--; 217 return (0); 218 } 219 } 220 221 /* 222 * read/seek/pos virtualisation wrappers. Because objects can come either from 223 * a file or memory, the read/seek/pos functions need to be wrapped to allow 224 * them to be used on either a file handle or a memory buffer. 225 */ 226 227 static size_t 228 fread_wrapper(ea_file_impl_t *f, void *buf, size_t sz) 229 { 230 size_t retval; 231 232 retval = fread(buf, 1, sz, f->ef_fp); 233 if (retval == 0 && ferror(f->ef_fp)) { 234 retval = (size_t)-1; 235 } 236 return (retval); 237 } 238 239 static size_t 240 bufread_wrapper(ea_file_impl_t *f, void *buf, size_t sz) 241 { 242 if (f->ef_bufsize == 0 && sz != 0) 243 return ((size_t)0); 244 245 if (f->ef_bufsize < sz) 246 sz = f->ef_bufsize; 247 248 bcopy(f->ef_buf, buf, sz); 249 f->ef_buf += sz; 250 f->ef_bufsize -= sz; 251 252 return (sz); 253 } 254 255 static off_t 256 fseek_wrapper(ea_file_impl_t *f, off_t adv) 257 { 258 return (fseeko(f->ef_fp, adv, SEEK_CUR)); 259 } 260 261 static off_t 262 bufseek_wrapper(ea_file_impl_t *f, off_t adv) 263 { 264 if (f->ef_bufsize == 0 && adv != 0) 265 return (-1); 266 267 if (f->ef_bufsize < adv) 268 adv = f->ef_bufsize; 269 270 f->ef_buf += adv; 271 f->ef_bufsize -= adv; 272 273 return (0); 274 } 275 276 /*ARGSUSED*/ 277 static void * 278 fpos_wrapper(ea_file_impl_t *f) 279 { 280 return (NULL); 281 } 282 283 static void * 284 bufpos_wrapper(ea_file_impl_t *f) 285 { 286 return (f->ef_buf); 287 } 288 289 /* 290 * Public API 291 */ 292 293 void 294 exacct_seterr(int errval) 295 { 296 if (thr_main()) { 297 exacct_errval = errval; 298 return; 299 } 300 (void) thr_keycreate_once(&errkey, 0); 301 (void) thr_setspecific(errkey, (void *)(intptr_t)errval); 302 } 303 304 int 305 ea_error(void) 306 { 307 if (thr_main()) 308 return (exacct_errval); 309 if (errkey == THR_ONCE_KEY) 310 return (EXR_OK); 311 return ((int)(uintptr_t)pthread_getspecific(errkey)); 312 } 313 314 /* 315 * ea_next_object(), ea_previous_object(), and ea_get_object() are written such 316 * that the file cursor is always located on an object boundary. 317 */ 318 ea_object_type_t 319 ea_next_object(ea_file_t *ef, ea_object_t *obj) 320 { 321 ea_file_impl_t *f = (ea_file_impl_t *)ef; 322 ea_size_t len; 323 off_t backup; 324 size_t ret; 325 326 /* 327 * If ef_advance is zero, then we are executing after a get or previous 328 * operation and do not move to the next or previous object. Otherwise, 329 * advance to the next available item. Note that ef_advance does NOT 330 * include the large backskip at the end of a object, this being dealt 331 * with by the depth stack handling in stack_next_object. 332 */ 333 if (f->ef_advance != 0) { 334 if (fseeko(f->ef_fp, (off_t)f->ef_advance, SEEK_CUR) == -1) { 335 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 336 return (EO_ERROR); 337 } 338 if (stack_next_object(f, fread_wrapper) == -1) { 339 /* exacct_error set above. */ 340 return (EO_ERROR); 341 } 342 } 343 f->ef_advance = 0; 344 345 /* Read the catalog tag */ 346 ret = fread(&obj->eo_catalog, 1, sizeof (ea_catalog_t), f->ef_fp); 347 if (ret == 0) { 348 EXACCT_SET_ERR(EXR_EOF); 349 return (EO_ERROR); 350 } else if (ret < sizeof (ea_catalog_t)) { 351 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 352 return (EO_ERROR); 353 } 354 exacct_order32(&obj->eo_catalog); 355 356 backup = sizeof (ea_catalog_t); 357 obj->eo_type = EO_ITEM; 358 359 /* Figure out the offset to just before the large backskip. */ 360 switch (obj->eo_catalog & EXT_TYPE_MASK) { 361 case EXT_GROUP: 362 obj->eo_type = EO_GROUP; 363 f->ef_advance = sizeof (uint32_t); 364 /* FALLTHROUGH */ 365 case EXT_STRING: 366 case EXT_EXACCT_OBJECT: 367 case EXT_RAW: 368 if (fread(&len, 1, sizeof (ea_size_t), f->ef_fp) 369 < sizeof (ea_size_t)) { 370 obj->eo_type = EO_NONE; 371 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 372 return (EO_ERROR); 373 } 374 exacct_order64(&len); 375 /* Note: len already includes the size of the backskip. */ 376 f->ef_advance += sizeof (ea_catalog_t) + 377 sizeof (ea_size_t) + len; 378 backup += sizeof (ea_size_t); 379 break; 380 case EXT_UINT8: 381 f->ef_advance = sizeof (ea_catalog_t) + sizeof (uint8_t) + 382 sizeof (uint32_t); 383 break; 384 case EXT_UINT16: 385 f->ef_advance = sizeof (ea_catalog_t) + sizeof (uint16_t) + 386 sizeof (uint32_t); 387 break; 388 case EXT_UINT32: 389 f->ef_advance = sizeof (ea_catalog_t) + sizeof (uint32_t) + 390 sizeof (uint32_t); 391 break; 392 case EXT_UINT64: 393 f->ef_advance = sizeof (ea_catalog_t) + sizeof (uint64_t) + 394 sizeof (uint32_t); 395 break; 396 case EXT_DOUBLE: 397 f->ef_advance = sizeof (ea_catalog_t) + sizeof (double) + 398 sizeof (uint32_t); 399 break; 400 default: 401 obj->eo_type = EO_NONE; 402 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 403 return (EO_ERROR); 404 } 405 406 /* Reposition to the start of this object. */ 407 if (fseeko(f->ef_fp, -backup, SEEK_CUR) == -1) { 408 obj->eo_type = EO_NONE; 409 f->ef_advance = 0; 410 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 411 return (EO_ERROR); 412 } 413 414 EXACCT_SET_ERR(EXR_OK); 415 return (obj->eo_type); 416 } 417 418 ea_object_type_t 419 ea_previous_object(ea_file_t *ef, ea_object_t *obj) 420 { 421 ea_file_impl_t *f = (ea_file_impl_t *)ef; 422 uint32_t bkskip; 423 int r; 424 425 if (fseeko(f->ef_fp, -((off_t)sizeof (uint32_t)), SEEK_CUR) == -1) { 426 if (errno == EINVAL) { 427 EXACCT_SET_ERR(EXR_EOF); 428 } else { 429 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 430 } 431 return (EO_ERROR); 432 } 433 434 if ((r = fread(&bkskip, 1, sizeof (uint32_t), f->ef_fp)) != 435 sizeof (uint32_t)) { 436 if (r == 0) { 437 EXACCT_SET_ERR(EXR_EOF); 438 } else { 439 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 440 } 441 return (EO_ERROR); 442 } 443 exacct_order32(&bkskip); 444 445 /* 446 * A backskip of 0 means that the current record can't be skipped over. 447 * This will be true for the header record, and for records longer than 448 * 2^32. 449 */ 450 if (bkskip == 0) { 451 EXACCT_SET_ERR(EXR_EOF); 452 return (EO_ERROR); 453 } 454 (void) stack_previous_object(f); 455 456 if (fseeko(f->ef_fp, -((off_t)bkskip), SEEK_CUR) == -1) { 457 if (errno == EINVAL) { 458 /* 459 * If we attempted to seek past BOF, then the file was 460 * corrupt, as we can only trust the backskip we read. 461 */ 462 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 463 } else { 464 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 465 } 466 return (EO_ERROR); 467 } 468 469 f->ef_advance = 0; 470 return (ea_next_object(ef, obj)); 471 } 472 473 /* 474 * xget_object() contains the logic for extracting an individual object from a 475 * packed buffer, which it consumes using xread() and xseek() operations 476 * provided by the caller. flags may be set to either EUP_ALLOC, in which case 477 * new memory is allocated for the variable length items unpacked, or 478 * EUP_NOALLOC, in which case item data pointer indicate locations within the 479 * buffer, using the provided xpos() function. EUP_NOALLOC is generally not 480 * useful for callers representing interaction with actual file streams, and 481 * should not be specified thereby. 482 */ 483 static ea_object_type_t 484 xget_object( 485 ea_file_impl_t *f, 486 ea_object_t *obj, 487 size_t (*xread)(ea_file_impl_t *, void *, size_t), 488 off_t (*xseek)(ea_file_impl_t *, off_t), 489 void *(*xpos)(ea_file_impl_t *), 490 int flags) 491 { 492 ea_size_t sz; 493 uint32_t gp_backskip, scratch32; 494 void *buf; 495 size_t r; 496 497 /* Read the catalog tag. */ 498 if ((r = xread(f, &obj->eo_catalog, sizeof (ea_catalog_t))) == 0) { 499 EXACCT_SET_ERR(EXR_EOF); 500 return (EO_ERROR); 501 } else if (r != sizeof (ea_catalog_t)) { 502 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 503 return (EO_ERROR); 504 } 505 exacct_order32(&obj->eo_catalog); 506 507 /* 508 * If this is a record group, we treat it separately: only record 509 * groups cause us to allocate new depth frames. 510 */ 511 if ((obj->eo_catalog & EXT_TYPE_MASK) == EXT_GROUP) { 512 obj->eo_type = EO_GROUP; 513 514 /* Read size field, and number of objects. */ 515 if (xread(f, &sz, sizeof (ea_size_t)) != sizeof (ea_size_t)) { 516 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 517 return (EO_ERROR); 518 } 519 exacct_order64(&sz); 520 if (xread(f, &obj->eo_group.eg_nobjs, sizeof (uint32_t)) != 521 sizeof (uint32_t)) { 522 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 523 return (EO_ERROR); 524 } 525 exacct_order32(&obj->eo_group.eg_nobjs); 526 527 /* Now read the group's small backskip. */ 528 if (xread(f, &gp_backskip, sizeof (uint32_t)) != 529 sizeof (uint32_t)) { 530 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 531 return (EO_ERROR); 532 } 533 534 /* Push a new depth stack frame. */ 535 if (stack_new_group(f, obj->eo_group.eg_nobjs) != 0) { 536 /* exacct_error set above */ 537 return (EO_ERROR); 538 } 539 540 /* 541 * If the group has no items, we now need to position to the 542 * end of the group, because there will be no subsequent calls 543 * to process the group, it being empty. 544 */ 545 if (obj->eo_group.eg_nobjs == 0) { 546 if (stack_next_object(f, xread) == -1) { 547 /* exacct_error set above. */ 548 return (EO_ERROR); 549 } 550 } 551 552 f->ef_advance = 0; 553 EXACCT_SET_ERR(EXR_OK); 554 return (obj->eo_type); 555 } 556 557 /* 558 * Otherwise we are reading an item. 559 */ 560 obj->eo_type = EO_ITEM; 561 switch (obj->eo_catalog & EXT_TYPE_MASK) { 562 case EXT_STRING: 563 case EXT_EXACCT_OBJECT: 564 case EXT_RAW: 565 if (xread(f, &sz, sizeof (ea_size_t)) != sizeof (ea_size_t)) { 566 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 567 return (EO_ERROR); 568 } 569 exacct_order64(&sz); 570 /* 571 * Subtract backskip value from size. 572 */ 573 sz -= sizeof (uint32_t); 574 if ((flags & EUP_ALLOC_MASK) == EUP_NOALLOC) { 575 buf = xpos(f); 576 if (xseek(f, sz) == -1) { 577 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 578 return (EO_ERROR); 579 } 580 } else { 581 if ((buf = ea_alloc(sz)) == NULL) 582 /* exacct_error set above. */ 583 return (EO_ERROR); 584 if (xread(f, buf, sz) != sz) { 585 ea_free(buf, sz); 586 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 587 return (EO_ERROR); 588 } 589 } 590 obj->eo_item.ei_string = buf; 591 /* 592 * Maintain our consistent convention that string lengths 593 * include the terminating NULL character. 594 */ 595 obj->eo_item.ei_size = sz; 596 break; 597 case EXT_UINT8: 598 if (xread(f, &obj->eo_item.ei_uint8, sizeof (uint8_t)) != 599 sizeof (uint8_t)) { 600 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 601 return (EO_ERROR); 602 } 603 obj->eo_item.ei_size = sizeof (uint8_t); 604 break; 605 case EXT_UINT16: 606 if (xread(f, &obj->eo_item.ei_uint16, sizeof (uint16_t)) != 607 sizeof (uint16_t)) { 608 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 609 return (EO_ERROR); 610 } 611 exacct_order16(&obj->eo_item.ei_uint16); 612 obj->eo_item.ei_size = sizeof (uint16_t); 613 break; 614 case EXT_UINT32: 615 if (xread(f, &obj->eo_item.ei_uint32, sizeof (uint32_t)) != 616 sizeof (uint32_t)) { 617 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 618 return (EO_ERROR); 619 } 620 exacct_order32(&obj->eo_item.ei_uint32); 621 obj->eo_item.ei_size = sizeof (uint32_t); 622 break; 623 case EXT_UINT64: 624 if (xread(f, &obj->eo_item.ei_uint64, sizeof (uint64_t)) != 625 sizeof (uint64_t)) { 626 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 627 return (EO_ERROR); 628 } 629 exacct_order64(&obj->eo_item.ei_uint64); 630 obj->eo_item.ei_size = sizeof (uint64_t); 631 break; 632 case EXT_DOUBLE: 633 if (xread(f, &obj->eo_item.ei_double, sizeof (double)) != 634 sizeof (double)) { 635 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 636 return (EO_ERROR); 637 } 638 exacct_order64((uint64_t *)&obj->eo_item.ei_double); 639 obj->eo_item.ei_size = sizeof (double); 640 break; 641 default: 642 /* 643 * We've encountered an unknown type value. Flag the error and 644 * exit. 645 */ 646 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 647 return (EO_ERROR); 648 } 649 650 /* 651 * Advance over current large backskip value, 652 * and position at the start of the next object. 653 */ 654 if (xread(f, &scratch32, sizeof (scratch32)) != sizeof (scratch32)) { 655 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 656 return (EO_ERROR); 657 } 658 if (stack_next_object(f, xread) == -1) { 659 /* exacct_error set above. */ 660 return (EO_ERROR); 661 } 662 663 f->ef_advance = 0; 664 EXACCT_SET_ERR(EXR_OK); 665 return (obj->eo_type); 666 } 667 668 ea_object_type_t 669 ea_get_object(ea_file_t *ef, ea_object_t *obj) 670 { 671 obj->eo_next = NULL; 672 return (xget_object((ea_file_impl_t *)ef, obj, fread_wrapper, 673 fseek_wrapper, fpos_wrapper, EUP_ALLOC)); 674 } 675 676 /* 677 * unpack_group() recursively unpacks record groups from the buffer tucked 678 * within the passed ea_file, and attaches them to grp. 679 */ 680 static int 681 unpack_group(ea_file_impl_t *f, ea_object_t *grp, int flag) 682 { 683 ea_object_t *obj; 684 uint_t nobjs = grp->eo_group.eg_nobjs; 685 int i; 686 687 /* 688 * Set the group's object count to zero, as we will rebuild it via the 689 * individual object attachments. 690 */ 691 grp->eo_group.eg_nobjs = 0; 692 grp->eo_group.eg_objs = NULL; 693 694 for (i = 0; i < nobjs; i++) { 695 if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) { 696 /* exacct_errno set above. */ 697 return (-1); 698 } 699 obj->eo_next = NULL; 700 if (xget_object(f, obj, bufread_wrapper, bufseek_wrapper, 701 bufpos_wrapper, flag) == -1) { 702 ea_free(obj, sizeof (ea_object_t)); 703 /* exacct_errno set above. */ 704 return (-1); 705 } 706 707 (void) ea_attach_to_group(grp, obj); 708 709 if (obj->eo_type == EO_GROUP && 710 unpack_group(f, obj, flag) == -1) { 711 /* exacct_errno set above. */ 712 return (-1); 713 } 714 } 715 716 if (nobjs != grp->eo_group.eg_nobjs) { 717 EXACCT_SET_ERR(EXR_CORRUPT_FILE); 718 return (-1); 719 } 720 EXACCT_SET_ERR(EXR_OK); 721 return (0); 722 } 723 724 /* 725 * ea_unpack_object() can be considered as a finite series of get operations on 726 * a given buffer, that rebuilds the hierarchy of objects compacted by a pack 727 * operation. Because there is complex state associated with the group depth, 728 * ea_unpack_object() must complete as one operation on a given buffer. 729 */ 730 ea_object_type_t 731 ea_unpack_object(ea_object_t **objp, int flag, void *buf, size_t bufsize) 732 { 733 ea_file_impl_t fake; 734 ea_object_t *obj; 735 ea_object_type_t first_obj_type; 736 737 *objp = NULL; 738 if (buf == NULL) { 739 EXACCT_SET_ERR(EXR_INVALID_BUF); 740 return (EO_ERROR); 741 } 742 743 /* Set up the structures needed for unpacking */ 744 bzero(&fake, sizeof (ea_file_impl_t)); 745 if (stack_check(&fake) == -1) { 746 /* exacct_errno set above. */ 747 return (EO_ERROR); 748 } 749 fake.ef_buf = buf; 750 fake.ef_bufsize = bufsize; 751 752 /* Unpack the first object in the buffer - this should succeed. */ 753 if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) { 754 stack_free(&fake); 755 /* exacct_errno set above. */ 756 return (EO_ERROR); 757 } 758 obj->eo_next = NULL; 759 if ((first_obj_type = xget_object(&fake, obj, bufread_wrapper, 760 bufseek_wrapper, bufpos_wrapper, flag)) == -1) { 761 stack_free(&fake); 762 ea_free(obj, sizeof (ea_object_t)); 763 /* exacct_errno set above. */ 764 return (EO_ERROR); 765 } 766 767 if (obj->eo_type == EO_GROUP && unpack_group(&fake, obj, flag) == -1) { 768 stack_free(&fake); 769 ea_free_object(obj, flag); 770 /* exacct_errno set above. */ 771 return (EO_ERROR); 772 } 773 *objp = obj; 774 775 /* 776 * There may be other objects in the buffer - if so, chain them onto 777 * the end of the list. We have reached the end of the list when 778 * xget_object() returns -1 with exacct_error set to EXR_EOF. 779 */ 780 for (;;) { 781 if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) { 782 stack_free(&fake); 783 ea_free_object(*objp, flag); 784 *objp = NULL; 785 /* exacct_errno set above. */ 786 return (EO_ERROR); 787 } 788 obj->eo_next = NULL; 789 if (xget_object(&fake, obj, bufread_wrapper, bufseek_wrapper, 790 bufpos_wrapper, flag) == -1) { 791 stack_free(&fake); 792 ea_free(obj, sizeof (ea_object_t)); 793 if (ea_error() == EXR_EOF) { 794 EXACCT_SET_ERR(EXR_OK); 795 return (first_obj_type); 796 } else { 797 ea_free_object(*objp, flag); 798 *objp = NULL; 799 /* exacct_error set above. */ 800 return (EO_ERROR); 801 } 802 } 803 804 (void) ea_attach_to_object(*objp, obj); 805 806 if (obj->eo_type == EO_GROUP && 807 unpack_group(&fake, obj, flag) == -1) { 808 stack_free(&fake); 809 ea_free(obj, sizeof (ea_object_t)); 810 ea_free_object(*objp, flag); 811 *objp = NULL; 812 /* exacct_errno set above. */ 813 return (EO_ERROR); 814 } 815 } 816 } 817 818 int 819 ea_write_object(ea_file_t *ef, ea_object_t *obj) 820 { 821 ea_size_t sz; 822 void *buf; 823 ea_file_impl_t *f = (ea_file_impl_t *)ef; 824 825 /* 826 * If we weren't opened for writing, this call fails. 827 */ 828 if ((f->ef_oflags & O_RDWR) == 0 && 829 (f->ef_oflags & O_WRONLY) == 0) { 830 EXACCT_SET_ERR(EXR_NOTSUPP); 831 return (-1); 832 } 833 834 /* Pack with a null buffer to get the size. */ 835 sz = ea_pack_object(obj, NULL, 0); 836 if (sz == -1 || (buf = ea_alloc(sz)) == NULL) { 837 /* exacct_error set above. */ 838 return (-1); 839 } 840 if (ea_pack_object(obj, buf, sz) == (size_t)-1) { 841 ea_free(buf, sz); 842 /* exacct_error set above. */ 843 return (-1); 844 } 845 if (fwrite(buf, sizeof (char), sz, f->ef_fp) != sz) { 846 ea_free(buf, sz); 847 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 848 return (-1); 849 } 850 ea_free(buf, sz); 851 EXACCT_SET_ERR(EXR_OK); 852 return (0); 853 } 854 855 /* 856 * validate_header() must be kept in sync with write_header(), given below, and 857 * exacct_create_header(), in uts/common/os/exacct.c. 858 */ 859 static int 860 validate_header(ea_file_t *ef, const char *creator) 861 { 862 ea_object_t hdr_grp; 863 ea_object_t scratch_obj; 864 int error = EXR_OK; 865 int saw_creator = 0; 866 int saw_version = 0; 867 int saw_type = 0; 868 int saw_hostname = 0; 869 int n; 870 ea_file_impl_t *f = (ea_file_impl_t *)ef; 871 872 bzero(&hdr_grp, sizeof (ea_object_t)); 873 874 if (ea_get_object(ef, &hdr_grp) != EO_GROUP) { 875 error = ea_error(); 876 goto error_case; 877 } 878 879 if (hdr_grp.eo_catalog != 880 (EXT_GROUP | EXC_DEFAULT | EXD_GROUP_HEADER)) { 881 error = EXR_CORRUPT_FILE; 882 goto error_case; 883 } 884 885 for (n = 0; n < hdr_grp.eo_group.eg_nobjs; n++) { 886 bzero(&scratch_obj, sizeof (ea_object_t)); 887 if (ea_get_object(ef, &scratch_obj) == -1) { 888 error = ea_error(); 889 goto error_case; 890 } 891 892 switch (scratch_obj.eo_catalog) { 893 case EXT_UINT32 | EXC_DEFAULT | EXD_VERSION: 894 if (scratch_obj.eo_item.ei_uint32 != EXACCT_VERSION) { 895 error = EXR_UNKN_VERSION; 896 goto error_case; 897 } 898 saw_version++; 899 break; 900 case EXT_STRING | EXC_DEFAULT | EXD_FILETYPE: 901 if (strcmp(scratch_obj.eo_item.ei_string, 902 EXACCT_HDR_STR) != 0) { 903 error = EXR_CORRUPT_FILE; 904 goto error_case; 905 } 906 saw_type++; 907 break; 908 case EXT_STRING | EXC_DEFAULT | EXD_CREATOR: 909 f->ef_creator = 910 ea_strdup(scratch_obj.eo_item.ei_string); 911 if (f->ef_creator == NULL) { 912 error = ea_error(); 913 goto error_case; 914 } 915 saw_creator++; 916 break; 917 /* The hostname is an optional field. */ 918 case EXT_STRING | EXC_DEFAULT | EXD_HOSTNAME: 919 f->ef_hostname = 920 ea_strdup(scratch_obj.eo_item.ei_string); 921 if (f->ef_hostname == NULL) { 922 error = ea_error(); 923 goto error_case; 924 } 925 saw_hostname++; 926 break; 927 default: 928 /* ignore unrecognized header members */ 929 break; 930 } 931 (void) ea_free_item(&scratch_obj, EUP_ALLOC); 932 } 933 934 if (saw_version && saw_type && saw_creator) { 935 if (creator && strcmp(f->ef_creator, creator) != 0) { 936 error = EXR_NO_CREATOR; 937 goto error_case; 938 } 939 EXACCT_SET_ERR(EXR_OK); 940 return (0); 941 } 942 943 error_case: 944 (void) ea_free_item(&scratch_obj, EUP_ALLOC); 945 if (saw_hostname) 946 ea_strfree(f->ef_hostname); 947 if (saw_creator) 948 ea_strfree(f->ef_creator); 949 EXACCT_SET_ERR(error); 950 return (-1); 951 } 952 953 static int 954 write_header(ea_file_t *ef) 955 { 956 ea_object_t hdr_grp; 957 ea_object_t vers_obj; 958 ea_object_t creator_obj; 959 ea_object_t filetype_obj; 960 ea_object_t hostname_obj; 961 uint32_t bskip; 962 const uint32_t version = EXACCT_VERSION; 963 ea_file_impl_t *f = (ea_file_impl_t *)ef; 964 void *buf; 965 size_t bufsize; 966 char hostbuf[SYSINFO_BUFSIZE]; 967 int error = EXR_OK; 968 969 bzero(&hdr_grp, sizeof (ea_object_t)); 970 bzero(&vers_obj, sizeof (ea_object_t)); 971 bzero(&creator_obj, sizeof (ea_object_t)); 972 bzero(&filetype_obj, sizeof (ea_object_t)); 973 bzero(&hostname_obj, sizeof (ea_object_t)); 974 bzero(hostbuf, SYSINFO_BUFSIZE); 975 976 (void) sysinfo(SI_HOSTNAME, hostbuf, SYSINFO_BUFSIZE); 977 978 if (ea_set_item(&vers_obj, EXT_UINT32 | EXC_DEFAULT | EXD_VERSION, 979 (void *)&version, 0) == -1 || 980 ea_set_item(&creator_obj, EXT_STRING | EXC_DEFAULT | EXD_CREATOR, 981 f->ef_creator, strlen(f->ef_creator)) == -1 || 982 ea_set_item(&filetype_obj, EXT_STRING | EXC_DEFAULT | EXD_FILETYPE, 983 EXACCT_HDR_STR, strlen(EXACCT_HDR_STR)) == -1 || 984 ea_set_item(&hostname_obj, EXT_STRING | EXC_DEFAULT | EXD_HOSTNAME, 985 hostbuf, strlen(hostbuf)) == -1) { 986 error = ea_error(); 987 goto cleanup1; 988 } 989 990 (void) ea_set_group(&hdr_grp, 991 EXT_GROUP | EXC_DEFAULT | EXD_GROUP_HEADER); 992 (void) ea_attach_to_group(&hdr_grp, &vers_obj); 993 (void) ea_attach_to_group(&hdr_grp, &creator_obj); 994 (void) ea_attach_to_group(&hdr_grp, &filetype_obj); 995 (void) ea_attach_to_group(&hdr_grp, &hostname_obj); 996 997 /* Get the required size by passing a null buffer. */ 998 bufsize = ea_pack_object(&hdr_grp, NULL, 0); 999 if ((buf = ea_alloc(bufsize)) == NULL) { 1000 error = ea_error(); 1001 goto cleanup1; 1002 } 1003 1004 if (ea_pack_object(&hdr_grp, buf, bufsize) == (size_t)-1) { 1005 error = ea_error(); 1006 goto cleanup2; 1007 } 1008 1009 /* 1010 * To prevent reading the header when reading the file backwards, 1011 * set the large backskip of the header group to 0 (last 4 bytes). 1012 */ 1013 bskip = 0; 1014 exacct_order32(&bskip); 1015 bcopy(&bskip, (char *)buf + bufsize - sizeof (bskip), 1016 sizeof (bskip)); 1017 1018 if (fwrite(buf, sizeof (char), bufsize, f->ef_fp) != bufsize || 1019 fflush(f->ef_fp) == EOF) { 1020 error = EXR_SYSCALL_FAIL; 1021 goto cleanup2; 1022 } 1023 1024 cleanup2: 1025 ea_free(buf, bufsize); 1026 cleanup1: 1027 (void) ea_free_item(&vers_obj, EUP_ALLOC); 1028 (void) ea_free_item(&creator_obj, EUP_ALLOC); 1029 (void) ea_free_item(&filetype_obj, EUP_ALLOC); 1030 (void) ea_free_item(&hostname_obj, EUP_ALLOC); 1031 EXACCT_SET_ERR(error); 1032 return (error == EXR_OK ? 0 : -1); 1033 } 1034 1035 const char * 1036 ea_get_creator(ea_file_t *ef) 1037 { 1038 return ((const char *)((ea_file_impl_t *)ef)->ef_creator); 1039 } 1040 1041 const char * 1042 ea_get_hostname(ea_file_t *ef) 1043 { 1044 return ((const char *)((ea_file_impl_t *)ef)->ef_hostname); 1045 } 1046 1047 int 1048 ea_fdopen(ea_file_t *ef, int fd, const char *creator, int aflags, int oflags) 1049 { 1050 ea_file_impl_t *f = (ea_file_impl_t *)ef; 1051 1052 bzero(f, sizeof (*f)); 1053 f->ef_oflags = oflags; 1054 f->ef_fd = fd; 1055 1056 /* Initialize depth stack. */ 1057 if (stack_check(f) == -1) { 1058 /* exacct_error set above. */ 1059 goto error1; 1060 } 1061 1062 /* 1063 * 1. If we are O_CREAT, then we will need to write a header 1064 * after opening name. 1065 */ 1066 if (oflags & O_CREAT) { 1067 if (creator == NULL) { 1068 EXACCT_SET_ERR(EXR_NO_CREATOR); 1069 goto error2; 1070 } 1071 if ((f->ef_creator = ea_strdup(creator)) == NULL) { 1072 /* exacct_error set above. */ 1073 goto error2; 1074 } 1075 if ((f->ef_fp = fdopen(f->ef_fd, "w")) == NULL) { 1076 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1077 goto error3; 1078 } 1079 if (write_header(ef) == -1) { 1080 /* exacct_error set above. */ 1081 goto error3; 1082 } 1083 1084 /* 1085 * 2. If we are not O_CREAT, but are RDWR or WRONLY, we need to 1086 * seek to EOF so that appends will succeed. 1087 */ 1088 } else if (oflags & O_RDWR || oflags & O_WRONLY) { 1089 if ((f->ef_fp = fdopen(f->ef_fd, "r+")) == NULL) { 1090 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1091 goto error2; 1092 } 1093 1094 if ((aflags & EO_VALIDATE_MSK) == EO_VALID_HDR) { 1095 if (validate_header(ef, creator) < 0) { 1096 /* exacct_error set above. */ 1097 goto error2; 1098 } 1099 } 1100 1101 if (fseeko(f->ef_fp, 0, SEEK_END) == -1) { 1102 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1103 goto error2; 1104 } 1105 1106 /* 1107 * 3. This is an undefined manner for opening an exacct file. 1108 */ 1109 } else if (oflags != O_RDONLY) { 1110 EXACCT_SET_ERR(EXR_NOTSUPP); 1111 goto error2; 1112 1113 /* 1114 * 4a. If we are RDONLY, then we are in a position such that 1115 * either a ea_get_object or an ea_next_object will succeed. If 1116 * aflags was set to EO_TAIL, seek to the end of the file. 1117 */ 1118 } else { 1119 if ((f->ef_fp = fdopen(f->ef_fd, "r")) == NULL) { 1120 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1121 goto error2; 1122 } 1123 1124 if ((aflags & EO_VALIDATE_MSK) == EO_VALID_HDR) { 1125 if (validate_header(ef, creator) == -1) { 1126 /* exacct_error set above. */ 1127 goto error2; 1128 } 1129 } 1130 1131 /* 1132 * 4b. Handle the "open at end" option, for consumers who want 1133 * to go backwards through the file (i.e. lastcomm). 1134 */ 1135 if ((aflags & EO_POSN_MSK) == EO_TAIL) { 1136 if (fseeko(f->ef_fp, 0, SEEK_END) < 0) { 1137 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1138 goto error2; 1139 } 1140 } 1141 } 1142 1143 EXACCT_SET_ERR(EXR_OK); 1144 return (0); 1145 1146 /* Error cleanup code */ 1147 error3: 1148 ea_strfree(f->ef_creator); 1149 error2: 1150 stack_free(f); 1151 error1: 1152 bzero(f, sizeof (*f)); 1153 return (-1); 1154 } 1155 1156 int 1157 ea_open(ea_file_t *ef, const char *name, const char *creator, 1158 int aflags, int oflags, mode_t mode) 1159 { 1160 int fd; 1161 1162 /* 1163 * If overwriting an existing file, make sure to truncate it 1164 * to prevent the file being created corrupt. 1165 */ 1166 if (oflags & O_CREAT) 1167 oflags |= O_TRUNC; 1168 1169 if ((fd = open(name, oflags, mode)) == -1) { 1170 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1171 return (-1); 1172 } 1173 1174 if (ea_fdopen(ef, fd, creator, aflags, oflags) == -1) { 1175 (void) close(fd); 1176 return (-1); 1177 } 1178 1179 return (0); 1180 } 1181 1182 /* 1183 * ea_close() performs all appropriate close operations on the open exacct file, 1184 * including releasing any memory allocated while parsing the file. 1185 */ 1186 int 1187 ea_close(ea_file_t *ef) 1188 { 1189 ea_file_impl_t *f = (ea_file_impl_t *)ef; 1190 1191 if (f->ef_creator != NULL) 1192 ea_strfree(f->ef_creator); 1193 if (f->ef_hostname != NULL) 1194 ea_strfree(f->ef_hostname); 1195 1196 ea_free(f->ef_depth, f->ef_mxdeep * sizeof (ea_file_depth_t)); 1197 1198 if (fclose(f->ef_fp)) { 1199 EXACCT_SET_ERR(EXR_SYSCALL_FAIL); 1200 return (-1); 1201 } 1202 1203 EXACCT_SET_ERR(EXR_OK); 1204 return (0); 1205 } 1206 1207 /* 1208 * Empty the input buffer and clear any underlying EOF or error bits set on the 1209 * underlying FILE. This can be used by any library clients who wish to handle 1210 * files that are in motion or who wish to seek the underlying file descriptor. 1211 */ 1212 void 1213 ea_clear(ea_file_t *ef) 1214 { 1215 ea_file_impl_t *f = (ea_file_impl_t *)ef; 1216 1217 (void) fflush(f->ef_fp); 1218 clearerr(f->ef_fp); 1219 } 1220 1221 /* 1222 * Copy an ea_object_t. Note that in the case of a group, just the group 1223 * object will be copied, and not its list of members. To recursively copy 1224 * a group or a list of items use ea_copy_tree(). 1225 */ 1226 ea_object_t * 1227 ea_copy_object(const ea_object_t *src) 1228 { 1229 ea_object_t *dst; 1230 1231 /* Allocate a new object and copy to it. */ 1232 if ((dst = ea_alloc(sizeof (ea_object_t))) == NULL) { 1233 return (NULL); 1234 } 1235 bcopy(src, dst, sizeof (ea_object_t)); 1236 dst->eo_next = NULL; 1237 1238 switch (src->eo_type) { 1239 case EO_GROUP: 1240 dst->eo_group.eg_nobjs = 0; 1241 dst->eo_group.eg_objs = NULL; 1242 break; 1243 case EO_ITEM: 1244 /* Items containing pointers need special treatment. */ 1245 switch (src->eo_catalog & EXT_TYPE_MASK) { 1246 case EXT_STRING: 1247 if (src->eo_item.ei_string != NULL) { 1248 dst->eo_item.ei_string = 1249 ea_strdup(src->eo_item.ei_string); 1250 if (dst->eo_item.ei_string == NULL) { 1251 ea_free_object(dst, EUP_ALLOC); 1252 return (NULL); 1253 } 1254 } 1255 break; 1256 case EXT_RAW: 1257 if (src->eo_item.ei_raw != NULL) { 1258 dst->eo_item.ei_raw = 1259 ea_alloc(src->eo_item.ei_size); 1260 if (dst->eo_item.ei_raw == NULL) { 1261 ea_free_object(dst, EUP_ALLOC); 1262 return (NULL); 1263 } 1264 bcopy(src->eo_item.ei_raw, dst->eo_item.ei_raw, 1265 (size_t)src->eo_item.ei_size); 1266 } 1267 break; 1268 case EXT_EXACCT_OBJECT: 1269 if (src->eo_item.ei_object != NULL) { 1270 dst->eo_item.ei_object = 1271 ea_alloc(src->eo_item.ei_size); 1272 if (dst->eo_item.ei_object == NULL) { 1273 ea_free_object(dst, EUP_ALLOC); 1274 return (NULL); 1275 } 1276 bcopy(src->eo_item.ei_raw, dst->eo_item.ei_raw, 1277 (size_t)src->eo_item.ei_size); 1278 } 1279 break; 1280 default: 1281 /* Other item types require no special handling. */ 1282 break; 1283 } 1284 break; 1285 default: 1286 ea_free_object(dst, EUP_ALLOC); 1287 EXACCT_SET_ERR(EXR_INVALID_OBJ); 1288 return (NULL); 1289 } 1290 EXACCT_SET_ERR(EXR_OK); 1291 return (dst); 1292 } 1293 1294 /* 1295 * Recursively copy a list of ea_object_t. All the elements in the eo_next 1296 * list will be copied, and any group objects will be recursively copied. 1297 */ 1298 ea_object_t * 1299 ea_copy_object_tree(const ea_object_t *src) 1300 { 1301 ea_object_t *ret_obj, *dst, *last; 1302 1303 for (ret_obj = last = NULL; src != NULL; 1304 last = dst, src = src->eo_next) { 1305 1306 /* Allocate a new object and copy to it. */ 1307 if ((dst = ea_copy_object(src)) == NULL) { 1308 ea_free_object(ret_obj, EUP_ALLOC); 1309 return (NULL); 1310 } 1311 1312 /* Groups need the object list copying. */ 1313 if (src->eo_type == EO_GROUP) { 1314 dst->eo_group.eg_objs = 1315 ea_copy_object_tree(src->eo_group.eg_objs); 1316 if (dst->eo_group.eg_objs == NULL) { 1317 ea_free_object(ret_obj, EUP_ALLOC); 1318 return (NULL); 1319 } 1320 dst->eo_group.eg_nobjs = src->eo_group.eg_nobjs; 1321 } 1322 1323 /* Remember the list head the first time round. */ 1324 if (ret_obj == NULL) { 1325 ret_obj = dst; 1326 } 1327 1328 /* Link together if not at the list head. */ 1329 if (last != NULL) { 1330 last->eo_next = dst; 1331 } 1332 } 1333 EXACCT_SET_ERR(EXR_OK); 1334 return (ret_obj); 1335 } 1336 1337 /* 1338 * Read in the specified number of objects, returning the same data 1339 * structure that would have originally been passed to ea_write(). 1340 */ 1341 ea_object_t * 1342 ea_get_object_tree(ea_file_t *ef, uint32_t nobj) 1343 { 1344 ea_object_t *first_obj, *prev_obj, *obj; 1345 1346 first_obj = prev_obj = NULL; 1347 while (nobj--) { 1348 /* Allocate space for the new object. */ 1349 obj = ea_alloc(sizeof (ea_object_t)); 1350 bzero(obj, sizeof (*obj)); 1351 1352 /* Read it in. */ 1353 if (ea_get_object(ef, obj) == -1) { 1354 ea_free(obj, sizeof (ea_object_t)); 1355 if (first_obj != NULL) { 1356 ea_free_object(first_obj, EUP_ALLOC); 1357 } 1358 return (NULL); 1359 } 1360 1361 /* Link it into the list. */ 1362 if (first_obj == NULL) { 1363 first_obj = obj; 1364 } 1365 if (prev_obj != NULL) { 1366 prev_obj->eo_next = obj; 1367 } 1368 prev_obj = obj; 1369 1370 /* Recurse if the object is a group with contents. */ 1371 if (obj->eo_type == EO_GROUP && obj->eo_group.eg_nobjs > 0) { 1372 if ((obj->eo_group.eg_objs = ea_get_object_tree(ef, 1373 obj->eo_group.eg_nobjs)) == NULL) { 1374 /* exacct_error set above. */ 1375 ea_free_object(first_obj, EUP_ALLOC); 1376 return (NULL); 1377 } 1378 } 1379 } 1380 EXACCT_SET_ERR(EXR_OK); 1381 return (first_obj); 1382 } 1383