1 /////////////////////////////////////////////////////////////////////////////// 2 // 3 /// \file index.c 4 /// \brief Handling of .xz Indexes and some other Stream information 5 // 6 // Author: Lasse Collin 7 // 8 // This file has been put into the public domain. 9 // You can do whatever you want with this file. 10 // 11 /////////////////////////////////////////////////////////////////////////////// 12 13 #include "index.h" 14 #include "stream_flags_common.h" 15 16 17 /// \brief How many Records to allocate at once 18 /// 19 /// This should be big enough to avoid making lots of tiny allocations 20 /// but small enough to avoid too much unused memory at once. 21 #define INDEX_GROUP_SIZE 512 22 23 24 /// \brief How many Records can be allocated at once at maximum 25 #define PREALLOC_MAX ((SIZE_MAX - sizeof(index_group)) / sizeof(index_record)) 26 27 28 /// \brief Base structure for index_stream and index_group structures 29 typedef struct index_tree_node_s index_tree_node; 30 struct index_tree_node_s { 31 /// Uncompressed start offset of this Stream (relative to the 32 /// beginning of the file) or Block (relative to the beginning 33 /// of the Stream) 34 lzma_vli uncompressed_base; 35 36 /// Compressed start offset of this Stream or Block 37 lzma_vli compressed_base; 38 39 index_tree_node *parent; 40 index_tree_node *left; 41 index_tree_node *right; 42 }; 43 44 45 /// \brief AVL tree to hold index_stream or index_group structures 46 typedef struct { 47 /// Root node 48 index_tree_node *root; 49 50 /// Leftmost node. Since the tree will be filled sequentially, 51 /// this won't change after the first node has been added to 52 /// the tree. 53 index_tree_node *leftmost; 54 55 /// The rightmost node in the tree. Since the tree is filled 56 /// sequentially, this is always the node where to add the new data. 57 index_tree_node *rightmost; 58 59 /// Number of nodes in the tree 60 uint32_t count; 61 62 } index_tree; 63 64 65 typedef struct { 66 lzma_vli uncompressed_sum; 67 lzma_vli unpadded_sum; 68 } index_record; 69 70 71 typedef struct { 72 /// Every Record group is part of index_stream.groups tree. 73 index_tree_node node; 74 75 /// Number of Blocks in this Stream before this group. 76 lzma_vli number_base; 77 78 /// Number of Records that can be put in records[]. 79 size_t allocated; 80 81 /// Index of the last Record in use. 82 size_t last; 83 84 /// The sizes in this array are stored as cumulative sums relative 85 /// to the beginning of the Stream. This makes it possible to 86 /// use binary search in lzma_index_locate(). 87 /// 88 /// Note that the cumulative summing is done specially for 89 /// unpadded_sum: The previous value is rounded up to the next 90 /// multiple of four before adding the Unpadded Size of the new 91 /// Block. The total encoded size of the Blocks in the Stream 92 /// is records[last].unpadded_sum in the last Record group of 93 /// the Stream. 94 /// 95 /// For example, if the Unpadded Sizes are 39, 57, and 81, the 96 /// stored values are 39, 97 (40 + 57), and 181 (100 + 181). 97 /// The total encoded size of these Blocks is 184. 98 /// 99 /// This is a flexible array, because it makes easy to optimize 100 /// memory usage in case someone concatenates many Streams that 101 /// have only one or few Blocks. 102 index_record records[]; 103 104 } index_group; 105 106 107 typedef struct { 108 /// Every index_stream is a node in the tree of Sreams. 109 index_tree_node node; 110 111 /// Number of this Stream (first one is 1) 112 uint32_t number; 113 114 /// Total number of Blocks before this Stream 115 lzma_vli block_number_base; 116 117 /// Record groups of this Stream are stored in a tree. 118 /// It's a T-tree with AVL-tree balancing. There are 119 /// INDEX_GROUP_SIZE Records per node by default. 120 /// This keeps the number of memory allocations reasonable 121 /// and finding a Record is fast. 122 index_tree groups; 123 124 /// Number of Records in this Stream 125 lzma_vli record_count; 126 127 /// Size of the List of Records field in this Stream. This is used 128 /// together with record_count to calculate the size of the Index 129 /// field and thus the total size of the Stream. 130 lzma_vli index_list_size; 131 132 /// Stream Flags of this Stream. This is meaningful only if 133 /// the Stream Flags have been told us with lzma_index_stream_flags(). 134 /// Initially stream_flags.version is set to UINT32_MAX to indicate 135 /// that the Stream Flags are unknown. 136 lzma_stream_flags stream_flags; 137 138 /// Amount of Stream Padding after this Stream. This defaults to 139 /// zero and can be set with lzma_index_stream_padding(). 140 lzma_vli stream_padding; 141 142 } index_stream; 143 144 145 struct lzma_index_s { 146 /// AVL-tree containing the Stream(s). Often there is just one 147 /// Stream, but using a tree keeps lookups fast even when there 148 /// are many concatenated Streams. 149 index_tree streams; 150 151 /// Uncompressed size of all the Blocks in the Stream(s) 152 lzma_vli uncompressed_size; 153 154 /// Total size of all the Blocks in the Stream(s) 155 lzma_vli total_size; 156 157 /// Total number of Records in all Streams in this lzma_index 158 lzma_vli record_count; 159 160 /// Size of the List of Records field if all the Streams in this 161 /// lzma_index were packed into a single Stream (makes it simpler to 162 /// take many .xz files and combine them into a single Stream). 163 /// 164 /// This value together with record_count is needed to calculate 165 /// Backward Size that is stored into Stream Footer. 166 lzma_vli index_list_size; 167 168 /// How many Records to allocate at once in lzma_index_append(). 169 /// This defaults to INDEX_GROUP_SIZE but can be overriden with 170 /// lzma_index_prealloc(). 171 size_t prealloc; 172 173 /// Bitmask indicating what integrity check types have been used 174 /// as set by lzma_index_stream_flags(). The bit of the last Stream 175 /// is not included here, since it is possible to change it by 176 /// calling lzma_index_stream_flags() again. 177 uint32_t checks; 178 }; 179 180 181 static void 182 index_tree_init(index_tree *tree) 183 { 184 tree->root = NULL; 185 tree->leftmost = NULL; 186 tree->rightmost = NULL; 187 tree->count = 0; 188 return; 189 } 190 191 192 /// Helper for index_tree_end() 193 static void 194 index_tree_node_end(index_tree_node *node, lzma_allocator *allocator, 195 void (*free_func)(void *node, lzma_allocator *allocator)) 196 { 197 // The tree won't ever be very huge, so recursion should be fine. 198 // 20 levels in the tree is likely quite a lot already in practice. 199 if (node->left != NULL) 200 index_tree_node_end(node->left, allocator, free_func); 201 202 if (node->right != NULL) 203 index_tree_node_end(node->right, allocator, free_func); 204 205 if (free_func != NULL) 206 free_func(node, allocator); 207 208 lzma_free(node, allocator); 209 return; 210 } 211 212 213 /// Free the meory allocated for a tree. If free_func is not NULL, 214 /// it is called on each node before freeing the node. This is used 215 /// to free the Record groups from each index_stream before freeing 216 /// the index_stream itself. 217 static void 218 index_tree_end(index_tree *tree, lzma_allocator *allocator, 219 void (*free_func)(void *node, lzma_allocator *allocator)) 220 { 221 if (tree->root != NULL) 222 index_tree_node_end(tree->root, allocator, free_func); 223 224 return; 225 } 226 227 228 /// Add a new node to the tree. node->uncompressed_base and 229 /// node->compressed_base must have been set by the caller already. 230 static void 231 index_tree_append(index_tree *tree, index_tree_node *node) 232 { 233 node->parent = tree->rightmost; 234 node->left = NULL; 235 node->right = NULL; 236 237 ++tree->count; 238 239 // Handle the special case of adding the first node. 240 if (tree->root == NULL) { 241 tree->root = node; 242 tree->leftmost = node; 243 tree->rightmost = node; 244 return; 245 } 246 247 // The tree is always filled sequentially. 248 assert(tree->rightmost->uncompressed_base <= node->uncompressed_base); 249 assert(tree->rightmost->compressed_base < node->compressed_base); 250 251 // Add the new node after the rightmost node. It's the correct 252 // place due to the reason above. 253 tree->rightmost->right = node; 254 tree->rightmost = node; 255 256 // Balance the AVL-tree if needed. We don't need to keep the balance 257 // factors in nodes, because we always fill the tree sequentially, 258 // and thus know the state of the tree just by looking at the node 259 // count. From the node count we can calculate how many steps to go 260 // up in the tree to find the rotation root. 261 uint32_t up = tree->count ^ (UINT32_C(1) << bsr32(tree->count)); 262 if (up != 0) { 263 // Locate the root node for the rotation. 264 up = ctz32(tree->count) + 2; 265 do { 266 node = node->parent; 267 } while (--up > 0); 268 269 // Rotate left using node as the rotation root. 270 index_tree_node *pivot = node->right; 271 272 if (node->parent == NULL) { 273 tree->root = pivot; 274 } else { 275 assert(node->parent->right == node); 276 node->parent->right = pivot; 277 } 278 279 pivot->parent = node->parent; 280 281 node->right = pivot->left; 282 if (node->right != NULL) 283 node->right->parent = node; 284 285 pivot->left = node; 286 node->parent = pivot; 287 } 288 289 return; 290 } 291 292 293 /// Get the next node in the tree. Return NULL if there are no more nodes. 294 static void * 295 index_tree_next(const index_tree_node *node) 296 { 297 if (node->right != NULL) { 298 node = node->right; 299 while (node->left != NULL) 300 node = node->left; 301 302 return (void *)(node); 303 } 304 305 while (node->parent != NULL && node->parent->right == node) 306 node = node->parent; 307 308 return (void *)(node->parent); 309 } 310 311 312 /// Locate a node that contains the given uncompressed offset. It is 313 /// caller's job to check that target is not bigger than the uncompressed 314 /// size of the tree (the last node would be returned in that case still). 315 static void * 316 index_tree_locate(const index_tree *tree, lzma_vli target) 317 { 318 const index_tree_node *result = NULL; 319 const index_tree_node *node = tree->root; 320 321 assert(tree->leftmost == NULL 322 || tree->leftmost->uncompressed_base == 0); 323 324 // Consecutive nodes may have the same uncompressed_base. 325 // We must pick the rightmost one. 326 while (node != NULL) { 327 if (node->uncompressed_base > target) { 328 node = node->left; 329 } else { 330 result = node; 331 node = node->right; 332 } 333 } 334 335 return (void *)(result); 336 } 337 338 339 /// Allocate and initialize a new Stream using the given base offsets. 340 static index_stream * 341 index_stream_init(lzma_vli compressed_base, lzma_vli uncompressed_base, 342 lzma_vli stream_number, lzma_vli block_number_base, 343 lzma_allocator *allocator) 344 { 345 index_stream *s = lzma_alloc(sizeof(index_stream), allocator); 346 if (s == NULL) 347 return NULL; 348 349 s->node.uncompressed_base = uncompressed_base; 350 s->node.compressed_base = compressed_base; 351 s->node.parent = NULL; 352 s->node.left = NULL; 353 s->node.right = NULL; 354 355 s->number = stream_number; 356 s->block_number_base = block_number_base; 357 358 index_tree_init(&s->groups); 359 360 s->record_count = 0; 361 s->index_list_size = 0; 362 s->stream_flags.version = UINT32_MAX; 363 s->stream_padding = 0; 364 365 return s; 366 } 367 368 369 /// Free the memory allocated for a Stream and its Record groups. 370 static void 371 index_stream_end(void *node, lzma_allocator *allocator) 372 { 373 index_stream *s = node; 374 index_tree_end(&s->groups, allocator, NULL); 375 return; 376 } 377 378 379 static lzma_index * 380 index_init_plain(lzma_allocator *allocator) 381 { 382 lzma_index *i = lzma_alloc(sizeof(lzma_index), allocator); 383 if (i != NULL) { 384 index_tree_init(&i->streams); 385 i->uncompressed_size = 0; 386 i->total_size = 0; 387 i->record_count = 0; 388 i->index_list_size = 0; 389 i->prealloc = INDEX_GROUP_SIZE; 390 i->checks = 0; 391 } 392 393 return i; 394 } 395 396 397 extern LZMA_API(lzma_index *) 398 lzma_index_init(lzma_allocator *allocator) 399 { 400 lzma_index *i = index_init_plain(allocator); 401 index_stream *s = index_stream_init(0, 0, 1, 0, allocator); 402 if (i == NULL || s == NULL) { 403 index_stream_end(s, allocator); 404 lzma_free(i, allocator); 405 } 406 407 index_tree_append(&i->streams, &s->node); 408 409 return i; 410 } 411 412 413 extern LZMA_API(void) 414 lzma_index_end(lzma_index *i, lzma_allocator *allocator) 415 { 416 // NOTE: If you modify this function, check also the bottom 417 // of lzma_index_cat(). 418 if (i != NULL) { 419 index_tree_end(&i->streams, allocator, &index_stream_end); 420 lzma_free(i, allocator); 421 } 422 423 return; 424 } 425 426 427 extern void 428 lzma_index_prealloc(lzma_index *i, lzma_vli records) 429 { 430 if (records > PREALLOC_MAX) 431 records = PREALLOC_MAX; 432 433 i->prealloc = (size_t)(records); 434 return; 435 } 436 437 438 extern LZMA_API(uint64_t) 439 lzma_index_memusage(lzma_vli streams, lzma_vli blocks) 440 { 441 // This calculates an upper bound that is only a little bit 442 // bigger than the exact maximum memory usage with the given 443 // parameters. 444 445 // Typical malloc() overhead is 2 * sizeof(void *) but we take 446 // a little bit extra just in case. Using LZMA_MEMUSAGE_BASE 447 // instead would give too inaccurate estimate. 448 const size_t alloc_overhead = 4 * sizeof(void *); 449 450 // Amount of memory needed for each Stream base structures. 451 // We assume that every Stream has at least one Block and 452 // thus at least one group. 453 const size_t stream_base = sizeof(index_stream) 454 + sizeof(index_group) + 2 * alloc_overhead; 455 456 // Amount of memory needed per group. 457 const size_t group_base = sizeof(index_group) 458 + INDEX_GROUP_SIZE * sizeof(index_record) 459 + alloc_overhead; 460 461 // Number of groups. There may actually be more, but that overhead 462 // has been taken into account in stream_base already. 463 const lzma_vli groups 464 = (blocks + INDEX_GROUP_SIZE - 1) / INDEX_GROUP_SIZE; 465 466 // Memory used by index_stream and index_group structures. 467 const uint64_t streams_mem = streams * stream_base; 468 const uint64_t groups_mem = groups * group_base; 469 470 // Memory used by the base structure. 471 const uint64_t index_base = sizeof(lzma_index) + alloc_overhead; 472 473 // Validate the arguments and catch integer overflows. 474 // Maximum number of Streams is "only" UINT32_MAX, because 475 // that limit is used by the tree containing the Streams. 476 const uint64_t limit = UINT64_MAX - index_base; 477 if (streams == 0 || streams > UINT32_MAX || blocks > LZMA_VLI_MAX 478 || streams > limit / stream_base 479 || groups > limit / group_base 480 || limit - streams_mem < groups_mem) 481 return UINT64_MAX; 482 483 return index_base + streams_mem + groups_mem; 484 } 485 486 487 extern LZMA_API(uint64_t) 488 lzma_index_memused(const lzma_index *i) 489 { 490 return lzma_index_memusage(i->streams.count, i->record_count); 491 } 492 493 494 extern LZMA_API(lzma_vli) 495 lzma_index_block_count(const lzma_index *i) 496 { 497 return i->record_count; 498 } 499 500 501 extern LZMA_API(lzma_vli) 502 lzma_index_stream_count(const lzma_index *i) 503 { 504 return i->streams.count; 505 } 506 507 508 extern LZMA_API(lzma_vli) 509 lzma_index_size(const lzma_index *i) 510 { 511 return index_size(i->record_count, i->index_list_size); 512 } 513 514 515 extern LZMA_API(lzma_vli) 516 lzma_index_total_size(const lzma_index *i) 517 { 518 return i->total_size; 519 } 520 521 522 extern LZMA_API(lzma_vli) 523 lzma_index_stream_size(const lzma_index *i) 524 { 525 // Stream Header + Blocks + Index + Stream Footer 526 return LZMA_STREAM_HEADER_SIZE + i->total_size 527 + index_size(i->record_count, i->index_list_size) 528 + LZMA_STREAM_HEADER_SIZE; 529 } 530 531 532 static lzma_vli 533 index_file_size(lzma_vli compressed_base, lzma_vli unpadded_sum, 534 lzma_vli record_count, lzma_vli index_list_size, 535 lzma_vli stream_padding) 536 { 537 // Earlier Streams and Stream Paddings + Stream Header 538 // + Blocks + Index + Stream Footer + Stream Padding 539 // 540 // This might go over LZMA_VLI_MAX due to too big unpadded_sum 541 // when this function is used in lzma_index_append(). 542 lzma_vli file_size = compressed_base + 2 * LZMA_STREAM_HEADER_SIZE 543 + stream_padding + vli_ceil4(unpadded_sum); 544 if (file_size > LZMA_VLI_MAX) 545 return LZMA_VLI_UNKNOWN; 546 547 // The same applies here. 548 file_size += index_size(record_count, index_list_size); 549 if (file_size > LZMA_VLI_MAX) 550 return LZMA_VLI_UNKNOWN; 551 552 return file_size; 553 } 554 555 556 extern LZMA_API(lzma_vli) 557 lzma_index_file_size(const lzma_index *i) 558 { 559 const index_stream *s = (const index_stream *)(i->streams.rightmost); 560 const index_group *g = (const index_group *)(s->groups.rightmost); 561 return index_file_size(s->node.compressed_base, 562 g == NULL ? 0 : g->records[g->last].unpadded_sum, 563 s->record_count, s->index_list_size, 564 s->stream_padding); 565 } 566 567 568 extern LZMA_API(lzma_vli) 569 lzma_index_uncompressed_size(const lzma_index *i) 570 { 571 return i->uncompressed_size; 572 } 573 574 575 extern LZMA_API(uint32_t) 576 lzma_index_checks(const lzma_index *i) 577 { 578 uint32_t checks = i->checks; 579 580 // Get the type of the Check of the last Stream too. 581 const index_stream *s = (const index_stream *)(i->streams.rightmost); 582 if (s->stream_flags.version != UINT32_MAX) 583 checks |= UINT32_C(1) << s->stream_flags.check; 584 585 return checks; 586 } 587 588 589 extern uint32_t 590 lzma_index_padding_size(const lzma_index *i) 591 { 592 return (LZMA_VLI_C(4) - index_size_unpadded( 593 i->record_count, i->index_list_size)) & 3; 594 } 595 596 597 extern LZMA_API(lzma_ret) 598 lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) 599 { 600 if (i == NULL || stream_flags == NULL) 601 return LZMA_PROG_ERROR; 602 603 // Validate the Stream Flags. 604 return_if_error(lzma_stream_flags_compare( 605 stream_flags, stream_flags)); 606 607 index_stream *s = (index_stream *)(i->streams.rightmost); 608 s->stream_flags = *stream_flags; 609 610 return LZMA_OK; 611 } 612 613 614 extern LZMA_API(lzma_ret) 615 lzma_index_stream_padding(lzma_index *i, lzma_vli stream_padding) 616 { 617 if (i == NULL || stream_padding > LZMA_VLI_MAX 618 || (stream_padding & 3) != 0) 619 return LZMA_PROG_ERROR; 620 621 index_stream *s = (index_stream *)(i->streams.rightmost); 622 623 // Check that the new value won't make the file grow too big. 624 const lzma_vli old_stream_padding = s->stream_padding; 625 s->stream_padding = 0; 626 if (lzma_index_file_size(i) + stream_padding > LZMA_VLI_MAX) { 627 s->stream_padding = old_stream_padding; 628 return LZMA_DATA_ERROR; 629 } 630 631 s->stream_padding = stream_padding; 632 return LZMA_OK; 633 } 634 635 636 extern LZMA_API(lzma_ret) 637 lzma_index_append(lzma_index *i, lzma_allocator *allocator, 638 lzma_vli unpadded_size, lzma_vli uncompressed_size) 639 { 640 // Validate. 641 if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN 642 || unpadded_size > UNPADDED_SIZE_MAX 643 || uncompressed_size > LZMA_VLI_MAX) 644 return LZMA_PROG_ERROR; 645 646 index_stream *s = (index_stream *)(i->streams.rightmost); 647 index_group *g = (index_group *)(s->groups.rightmost); 648 649 const lzma_vli compressed_base = g == NULL ? 0 650 : vli_ceil4(g->records[g->last].unpadded_sum); 651 const lzma_vli uncompressed_base = g == NULL ? 0 652 : g->records[g->last].uncompressed_sum; 653 const uint32_t index_list_size_add = lzma_vli_size(unpadded_size) 654 + lzma_vli_size(uncompressed_size); 655 656 // Check that the file size will stay within limits. 657 if (index_file_size(s->node.compressed_base, 658 compressed_base + unpadded_size, s->record_count + 1, 659 s->index_list_size + index_list_size_add, 660 s->stream_padding) == LZMA_VLI_UNKNOWN) 661 return LZMA_DATA_ERROR; 662 663 // The size of the Index field must not exceed the maximum value 664 // that can be stored in the Backward Size field. 665 if (index_size(i->record_count + 1, 666 i->index_list_size + index_list_size_add) 667 > LZMA_BACKWARD_SIZE_MAX) 668 return LZMA_DATA_ERROR; 669 670 if (g != NULL && g->last + 1 < g->allocated) { 671 // There is space in the last group at least for one Record. 672 ++g->last; 673 } else { 674 // We need to allocate a new group. 675 g = lzma_alloc(sizeof(index_group) 676 + i->prealloc * sizeof(index_record), 677 allocator); 678 if (g == NULL) 679 return LZMA_MEM_ERROR; 680 681 g->last = 0; 682 g->allocated = i->prealloc; 683 684 // Reset prealloc so that if the application happens to 685 // add new Records, the allocation size will be sane. 686 i->prealloc = INDEX_GROUP_SIZE; 687 688 // Set the start offsets of this group. 689 g->node.uncompressed_base = uncompressed_base; 690 g->node.compressed_base = compressed_base; 691 g->number_base = s->record_count + 1; 692 693 // Add the new group to the Stream. 694 index_tree_append(&s->groups, &g->node); 695 } 696 697 // Add the new Record to the group. 698 g->records[g->last].uncompressed_sum 699 = uncompressed_base + uncompressed_size; 700 g->records[g->last].unpadded_sum 701 = compressed_base + unpadded_size; 702 703 // Update the totals. 704 ++s->record_count; 705 s->index_list_size += index_list_size_add; 706 707 i->total_size += vli_ceil4(unpadded_size); 708 i->uncompressed_size += uncompressed_size; 709 ++i->record_count; 710 i->index_list_size += index_list_size_add; 711 712 return LZMA_OK; 713 } 714 715 716 /// Structure to pass info to index_cat_helper() 717 typedef struct { 718 /// Uncompressed size of the destination 719 lzma_vli uncompressed_size; 720 721 /// Compressed file size of the destination 722 lzma_vli file_size; 723 724 /// Same as above but for Block numbers 725 lzma_vli block_number_add; 726 727 /// Number of Streams that were in the destination index before we 728 /// started appending new Streams from the source index. This is 729 /// used to fix the Stream numbering. 730 uint32_t stream_number_add; 731 732 /// Destination index' Stream tree 733 index_tree *streams; 734 735 } index_cat_info; 736 737 738 /// Add the Stream nodes from the source index to dest using recursion. 739 /// Simplest iterative traversal of the source tree wouldn't work, because 740 /// we update the pointers in nodes when moving them to the destination tree. 741 static void 742 index_cat_helper(const index_cat_info *info, index_stream *this) 743 { 744 index_stream *left = (index_stream *)(this->node.left); 745 index_stream *right = (index_stream *)(this->node.right); 746 747 if (left != NULL) 748 index_cat_helper(info, left); 749 750 this->node.uncompressed_base += info->uncompressed_size; 751 this->node.compressed_base += info->file_size; 752 this->number += info->stream_number_add; 753 this->block_number_base += info->block_number_add; 754 index_tree_append(info->streams, &this->node); 755 756 if (right != NULL) 757 index_cat_helper(info, right); 758 759 return; 760 } 761 762 763 extern LZMA_API(lzma_ret) 764 lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src, 765 lzma_allocator *allocator) 766 { 767 const lzma_vli dest_file_size = lzma_index_file_size(dest); 768 769 // Check that we don't exceed the file size limits. 770 if (dest_file_size + lzma_index_file_size(src) > LZMA_VLI_MAX 771 || dest->uncompressed_size + src->uncompressed_size 772 > LZMA_VLI_MAX) 773 return LZMA_DATA_ERROR; 774 775 // Check that the encoded size of the combined lzma_indexes stays 776 // within limits. In theory, this should be done only if we know 777 // that the user plans to actually combine the Streams and thus 778 // construct a single Index (probably rare). However, exceeding 779 // this limit is quite theoretical, so we do this check always 780 // to simplify things elsewhere. 781 { 782 const lzma_vli dest_size = index_size_unpadded( 783 dest->record_count, dest->index_list_size); 784 const lzma_vli src_size = index_size_unpadded( 785 src->record_count, src->index_list_size); 786 if (vli_ceil4(dest_size + src_size) > LZMA_BACKWARD_SIZE_MAX) 787 return LZMA_DATA_ERROR; 788 } 789 790 // Optimize the last group to minimize memory usage. Allocation has 791 // to be done before modifying dest or src. 792 { 793 index_stream *s = (index_stream *)(dest->streams.rightmost); 794 index_group *g = (index_group *)(s->groups.rightmost); 795 if (g != NULL && g->last + 1 < g->allocated) { 796 assert(g->node.left == NULL); 797 assert(g->node.right == NULL); 798 799 index_group *newg = lzma_alloc(sizeof(index_group) 800 + (g->last + 1) 801 * sizeof(index_record), 802 allocator); 803 if (newg == NULL) 804 return LZMA_MEM_ERROR; 805 806 newg->node = g->node; 807 newg->allocated = g->last + 1; 808 newg->last = g->last; 809 newg->number_base = g->number_base; 810 811 memcpy(newg->records, g->records, newg->allocated 812 * sizeof(index_record)); 813 814 if (g->node.parent != NULL) { 815 assert(g->node.parent->right == &g->node); 816 g->node.parent->right = &newg->node; 817 } 818 819 if (s->groups.leftmost == &g->node) { 820 assert(s->groups.root == &g->node); 821 s->groups.leftmost = &newg->node; 822 s->groups.root = &newg->node; 823 } 824 825 if (s->groups.rightmost == &g->node) 826 s->groups.rightmost = &newg->node; 827 828 lzma_free(g, allocator); 829 } 830 } 831 832 // Add all the Streams from src to dest. Update the base offsets 833 // of each Stream from src. 834 const index_cat_info info = { 835 .uncompressed_size = dest->uncompressed_size, 836 .file_size = dest_file_size, 837 .stream_number_add = dest->streams.count, 838 .block_number_add = dest->record_count, 839 .streams = &dest->streams, 840 }; 841 index_cat_helper(&info, (index_stream *)(src->streams.root)); 842 843 // Update info about all the combined Streams. 844 dest->uncompressed_size += src->uncompressed_size; 845 dest->total_size += src->total_size; 846 dest->record_count += src->record_count; 847 dest->index_list_size += src->index_list_size; 848 dest->checks = lzma_index_checks(dest) | src->checks; 849 850 // There's nothing else left in src than the base structure. 851 lzma_free(src, allocator); 852 853 return LZMA_OK; 854 } 855 856 857 /// Duplicate an index_stream. 858 static index_stream * 859 index_dup_stream(const index_stream *src, lzma_allocator *allocator) 860 { 861 // Catch a somewhat theoretical integer overflow. 862 if (src->record_count > PREALLOC_MAX) 863 return NULL; 864 865 // Allocate and initialize a new Stream. 866 index_stream *dest = index_stream_init(src->node.compressed_base, 867 src->node.uncompressed_base, src->number, 868 src->block_number_base, allocator); 869 870 // Return immediately if allocation failed or if there are 871 // no groups to duplicate. 872 if (dest == NULL || src->groups.leftmost == NULL) 873 return dest; 874 875 // Copy the overall information. 876 dest->record_count = src->record_count; 877 dest->index_list_size = src->index_list_size; 878 dest->stream_flags = src->stream_flags; 879 dest->stream_padding = src->stream_padding; 880 881 // Allocate memory for the Records. We put all the Records into 882 // a single group. It's simplest and also tends to make 883 // lzma_index_locate() a little bit faster with very big Indexes. 884 index_group *destg = lzma_alloc(sizeof(index_group) 885 + src->record_count * sizeof(index_record), 886 allocator); 887 if (destg == NULL) { 888 index_stream_end(dest, allocator); 889 return NULL; 890 } 891 892 // Initialize destg. 893 destg->node.uncompressed_base = 0; 894 destg->node.compressed_base = 0; 895 destg->number_base = 1; 896 destg->allocated = src->record_count; 897 destg->last = src->record_count - 1; 898 899 // Go through all the groups in src and copy the Records into destg. 900 const index_group *srcg = (const index_group *)(src->groups.leftmost); 901 size_t i = 0; 902 do { 903 memcpy(destg->records + i, srcg->records, 904 (srcg->last + 1) * sizeof(index_record)); 905 i += srcg->last + 1; 906 srcg = index_tree_next(&srcg->node); 907 } while (srcg != NULL); 908 909 assert(i == destg->allocated); 910 911 // Add the group to the new Stream. 912 index_tree_append(&dest->groups, &destg->node); 913 914 return dest; 915 } 916 917 918 extern LZMA_API(lzma_index *) 919 lzma_index_dup(const lzma_index *src, lzma_allocator *allocator) 920 { 921 // Allocate the base structure (no initial Stream). 922 lzma_index *dest = index_init_plain(allocator); 923 if (dest == NULL) 924 return NULL; 925 926 // Copy the totals. 927 dest->uncompressed_size = src->uncompressed_size; 928 dest->total_size = src->total_size; 929 dest->record_count = src->record_count; 930 dest->index_list_size = src->index_list_size; 931 932 // Copy the Streams and the groups in them. 933 const index_stream *srcstream 934 = (const index_stream *)(src->streams.leftmost); 935 do { 936 index_stream *deststream = index_dup_stream( 937 srcstream, allocator); 938 if (deststream == NULL) { 939 lzma_index_end(dest, allocator); 940 return NULL; 941 } 942 943 index_tree_append(&dest->streams, &deststream->node); 944 945 srcstream = index_tree_next(&srcstream->node); 946 } while (srcstream != NULL); 947 948 return dest; 949 } 950 951 952 /// Indexing for lzma_index_iter.internal[] 953 enum { 954 ITER_INDEX, 955 ITER_STREAM, 956 ITER_GROUP, 957 ITER_RECORD, 958 ITER_METHOD, 959 }; 960 961 962 /// Values for lzma_index_iter.internal[ITER_METHOD].s 963 enum { 964 ITER_METHOD_NORMAL, 965 ITER_METHOD_NEXT, 966 ITER_METHOD_LEFTMOST, 967 }; 968 969 970 static void 971 iter_set_info(lzma_index_iter *iter) 972 { 973 const lzma_index *i = iter->internal[ITER_INDEX].p; 974 const index_stream *stream = iter->internal[ITER_STREAM].p; 975 const index_group *group = iter->internal[ITER_GROUP].p; 976 const size_t record = iter->internal[ITER_RECORD].s; 977 978 // lzma_index_iter.internal must not contain a pointer to the last 979 // group in the index, because that may be reallocated by 980 // lzma_index_cat(). 981 if (group == NULL) { 982 // There are no groups. 983 assert(stream->groups.root == NULL); 984 iter->internal[ITER_METHOD].s = ITER_METHOD_LEFTMOST; 985 986 } else if (i->streams.rightmost != &stream->node 987 || stream->groups.rightmost != &group->node) { 988 // The group is not not the last group in the index. 989 iter->internal[ITER_METHOD].s = ITER_METHOD_NORMAL; 990 991 } else if (stream->groups.leftmost != &group->node) { 992 // The group isn't the only group in the Stream, thus we 993 // know that it must have a parent group i.e. it's not 994 // the root node. 995 assert(stream->groups.root != &group->node); 996 assert(group->node.parent->right == &group->node); 997 iter->internal[ITER_METHOD].s = ITER_METHOD_NEXT; 998 iter->internal[ITER_GROUP].p = group->node.parent; 999 1000 } else { 1001 // The Stream has only one group. 1002 assert(stream->groups.root == &group->node); 1003 assert(group->node.parent == NULL); 1004 iter->internal[ITER_METHOD].s = ITER_METHOD_LEFTMOST; 1005 iter->internal[ITER_GROUP].p = NULL; 1006 } 1007 1008 iter->stream.number = stream->number; 1009 iter->stream.block_count = stream->record_count; 1010 iter->stream.compressed_offset = stream->node.compressed_base; 1011 iter->stream.uncompressed_offset = stream->node.uncompressed_base; 1012 1013 // iter->stream.flags will be NULL if the Stream Flags haven't been 1014 // set with lzma_index_stream_flags(). 1015 iter->stream.flags = stream->stream_flags.version == UINT32_MAX 1016 ? NULL : &stream->stream_flags; 1017 iter->stream.padding = stream->stream_padding; 1018 1019 if (stream->groups.rightmost == NULL) { 1020 // Stream has no Blocks. 1021 iter->stream.compressed_size = index_size(0, 0) 1022 + 2 * LZMA_STREAM_HEADER_SIZE; 1023 iter->stream.uncompressed_size = 0; 1024 } else { 1025 const index_group *g = (const index_group *)( 1026 stream->groups.rightmost); 1027 1028 // Stream Header + Stream Footer + Index + Blocks 1029 iter->stream.compressed_size = 2 * LZMA_STREAM_HEADER_SIZE 1030 + index_size(stream->record_count, 1031 stream->index_list_size) 1032 + vli_ceil4(g->records[g->last].unpadded_sum); 1033 iter->stream.uncompressed_size 1034 = g->records[g->last].uncompressed_sum; 1035 } 1036 1037 if (group != NULL) { 1038 iter->block.number_in_stream = group->number_base + record; 1039 iter->block.number_in_file = iter->block.number_in_stream 1040 + stream->block_number_base; 1041 1042 iter->block.compressed_stream_offset 1043 = record == 0 ? group->node.compressed_base 1044 : vli_ceil4(group->records[ 1045 record - 1].unpadded_sum); 1046 iter->block.uncompressed_stream_offset 1047 = record == 0 ? group->node.uncompressed_base 1048 : group->records[record - 1].uncompressed_sum; 1049 1050 iter->block.uncompressed_size 1051 = group->records[record].uncompressed_sum 1052 - iter->block.uncompressed_stream_offset; 1053 iter->block.unpadded_size 1054 = group->records[record].unpadded_sum 1055 - iter->block.compressed_stream_offset; 1056 iter->block.total_size = vli_ceil4(iter->block.unpadded_size); 1057 1058 iter->block.compressed_stream_offset 1059 += LZMA_STREAM_HEADER_SIZE; 1060 1061 iter->block.compressed_file_offset 1062 = iter->block.compressed_stream_offset 1063 + iter->stream.compressed_offset; 1064 iter->block.uncompressed_file_offset 1065 = iter->block.uncompressed_stream_offset 1066 + iter->stream.uncompressed_offset; 1067 } 1068 1069 return; 1070 } 1071 1072 1073 extern LZMA_API(void) 1074 lzma_index_iter_init(lzma_index_iter *iter, const lzma_index *i) 1075 { 1076 iter->internal[ITER_INDEX].p = i; 1077 lzma_index_iter_rewind(iter); 1078 return; 1079 } 1080 1081 1082 extern LZMA_API(void) 1083 lzma_index_iter_rewind(lzma_index_iter *iter) 1084 { 1085 iter->internal[ITER_STREAM].p = NULL; 1086 iter->internal[ITER_GROUP].p = NULL; 1087 iter->internal[ITER_RECORD].s = 0; 1088 iter->internal[ITER_METHOD].s = ITER_METHOD_NORMAL; 1089 return; 1090 } 1091 1092 1093 extern LZMA_API(lzma_bool) 1094 lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode) 1095 { 1096 // Catch unsupported mode values. 1097 if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK) 1098 return true; 1099 1100 const lzma_index *i = iter->internal[ITER_INDEX].p; 1101 const index_stream *stream = iter->internal[ITER_STREAM].p; 1102 const index_group *group = NULL; 1103 size_t record = iter->internal[ITER_RECORD].s; 1104 1105 // If we are being asked for the next Stream, leave group to NULL 1106 // so that the rest of the this function thinks that this Stream 1107 // has no groups and will thus go to the next Stream. 1108 if (mode != LZMA_INDEX_ITER_STREAM) { 1109 // Get the pointer to the current group. See iter_set_inf() 1110 // for explanation. 1111 switch (iter->internal[ITER_METHOD].s) { 1112 case ITER_METHOD_NORMAL: 1113 group = iter->internal[ITER_GROUP].p; 1114 break; 1115 1116 case ITER_METHOD_NEXT: 1117 group = index_tree_next(iter->internal[ITER_GROUP].p); 1118 break; 1119 1120 case ITER_METHOD_LEFTMOST: 1121 group = (const index_group *)( 1122 stream->groups.leftmost); 1123 break; 1124 } 1125 } 1126 1127 again: 1128 if (stream == NULL) { 1129 // We at the beginning of the lzma_index. 1130 // Locate the first Stream. 1131 stream = (const index_stream *)(i->streams.leftmost); 1132 if (mode >= LZMA_INDEX_ITER_BLOCK) { 1133 // Since we are being asked to return information 1134 // about the first a Block, skip Streams that have 1135 // no Blocks. 1136 while (stream->groups.leftmost == NULL) { 1137 stream = index_tree_next(&stream->node); 1138 if (stream == NULL) 1139 return true; 1140 } 1141 } 1142 1143 // Start from the first Record in the Stream. 1144 group = (const index_group *)(stream->groups.leftmost); 1145 record = 0; 1146 1147 } else if (group != NULL && record < group->last) { 1148 // The next Record is in the same group. 1149 ++record; 1150 1151 } else { 1152 // This group has no more Records or this Stream has 1153 // no Blocks at all. 1154 record = 0; 1155 1156 // If group is not NULL, this Stream has at least one Block 1157 // and thus at least one group. Find the next group. 1158 if (group != NULL) 1159 group = index_tree_next(&group->node); 1160 1161 if (group == NULL) { 1162 // This Stream has no more Records. Find the next 1163 // Stream. If we are being asked to return information 1164 // about a Block, we skip empty Streams. 1165 do { 1166 stream = index_tree_next(&stream->node); 1167 if (stream == NULL) 1168 return true; 1169 } while (mode >= LZMA_INDEX_ITER_BLOCK 1170 && stream->groups.leftmost == NULL); 1171 1172 group = (const index_group *)( 1173 stream->groups.leftmost); 1174 } 1175 } 1176 1177 if (mode == LZMA_INDEX_ITER_NONEMPTY_BLOCK) { 1178 // We need to look for the next Block again if this Block 1179 // is empty. 1180 if (record == 0) { 1181 if (group->node.uncompressed_base 1182 == group->records[0].uncompressed_sum) 1183 goto again; 1184 } else if (group->records[record - 1].uncompressed_sum 1185 == group->records[record].uncompressed_sum) { 1186 goto again; 1187 } 1188 } 1189 1190 iter->internal[ITER_STREAM].p = stream; 1191 iter->internal[ITER_GROUP].p = group; 1192 iter->internal[ITER_RECORD].s = record; 1193 1194 iter_set_info(iter); 1195 1196 return false; 1197 } 1198 1199 1200 extern LZMA_API(lzma_bool) 1201 lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target) 1202 { 1203 const lzma_index *i = iter->internal[ITER_INDEX].p; 1204 1205 // If the target is past the end of the file, return immediately. 1206 if (i->uncompressed_size <= target) 1207 return true; 1208 1209 // Locate the Stream containing the target offset. 1210 const index_stream *stream = index_tree_locate(&i->streams, target); 1211 assert(stream != NULL); 1212 target -= stream->node.uncompressed_base; 1213 1214 // Locate the group containing the target offset. 1215 const index_group *group = index_tree_locate(&stream->groups, target); 1216 assert(group != NULL); 1217 1218 // Use binary search to locate the exact Record. It is the first 1219 // Record whose uncompressed_sum is greater than target. 1220 // This is because we want the rightmost Record that fullfills the 1221 // search criterion. It is possible that there are empty Blocks; 1222 // we don't want to return them. 1223 size_t left = 0; 1224 size_t right = group->last; 1225 1226 while (left < right) { 1227 const size_t pos = left + (right - left) / 2; 1228 if (group->records[pos].uncompressed_sum <= target) 1229 left = pos + 1; 1230 else 1231 right = pos; 1232 } 1233 1234 iter->internal[ITER_STREAM].p = stream; 1235 iter->internal[ITER_GROUP].p = group; 1236 iter->internal[ITER_RECORD].s = left; 1237 1238 iter_set_info(iter); 1239 1240 return false; 1241 } 1242